parquet-converter commited on
Commit
25bfda3
·
1 Parent(s): a0907c0

Update parquet files (step 20 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1phancelerku/anime-remove-background/Crossword Puzzle APK The Best App for Relaxing and Unwinding with Crosswords.md +0 -80
  2. spaces/1toTree/lora_test/ppdiffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py +0 -346
  3. spaces/44ov41za8i/FreeVC/app.py +0 -103
  4. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/model_param_init.py +0 -69
  5. spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/app.py +0 -267
  6. spaces/AIARTCHAN/openpose_editor/style.css +0 -28
  7. spaces/AIConsultant/MusicGen/audiocraft/metrics/kld.py +0 -218
  8. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py +0 -138
  9. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/util/learning_rates.py +0 -70
  10. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/base.py +0 -30
  11. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circularprogresscanvas/CircularProgressCanvas.js +0 -2
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Container.d.ts +0 -2
  13. spaces/AkitoP/umamusume_bert_vits2/attentions.py +0 -464
  14. spaces/AkitoP/umamusume_bert_vits2/losses.py +0 -58
  15. spaces/Akmyradov/TurkmenTTSweSTT/README.md +0 -14
  16. spaces/AlexZou/Deploy_Restoration/net/SGFMT.py +0 -126
  17. spaces/Amon1/ChatGPTForAcadamic/theme.py +0 -152
  18. spaces/Amrrs/DragGan-Inversion/torch_utils/ops/__init__.py +0 -9
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +0 -747
  20. spaces/Andy1621/uniformer_image_detection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py +0 -4
  21. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py +0 -9
  22. spaces/Andy1621/uniformer_video_demo/app.py +0 -127
  23. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/monkey_patch_gptq_lora.py +0 -39
  24. spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/utils.py +0 -189
  25. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/README.md +0 -174
  26. spaces/Ash2219/AIchatbot/app.py +0 -164
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/control.py +0 -225
  28. spaces/Bart92/RVC_HF/Dockerfile +0 -29
  29. spaces/Benson/text-generation/Examples/Choque De Clanes Nulos.md +0 -115
  30. spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Apk Avance Servidor.md +0 -100
  31. spaces/BigSalmon/InformalToFormal/app.py +0 -58
  32. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/register_coco.py +0 -125
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/transforms/transform.py +0 -139
  34. spaces/CVPR/DualStyleGAN/app.py +0 -204
  35. spaces/CVPR/LIVE/thrust/cmake/PrintNinjaBuildTimes.cmake +0 -101
  36. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/get_value.h +0 -23
  37. spaces/CikeyQI/meme-api/meme_generator/memes/marriage/__init__.py +0 -27
  38. spaces/CodingBillionaire/bark-voice-cloning/hubert/pre_kmeans_hubert.py +0 -85
  39. spaces/CofAI/tv/public/index.html +0 -325
  40. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/image_degradation/bsrgan_light.py +0 -651
  41. spaces/DEBO-PROJECT/DEBO-V1/modules/whisper_modules.py +0 -75
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/helpers.py +0 -959
  43. spaces/DragGan/DragGan-Inversion/stylegan_human/edit.py +0 -207
  44. spaces/Dragonnext/Unicorn-proxy/README.md +0 -10
  45. spaces/Eddycrack864/Applio-Inference/demucs/test.py +0 -109
  46. spaces/EronSamez/RVC_HFmeu/diffq/uniform.py +0 -121
  47. spaces/EuroPython2022/latr-vqa/app.py +0 -148
  48. spaces/Faridmaruf/rvc-genshin-v2/lib/infer_pack/onnx_inference.py +0 -145
  49. spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/waifu-tips.js +0 -405
  50. spaces/Flux9665/IMS-Toucan/Preprocessing/AudioPreprocessor.py +0 -166
spaces/1phancelerku/anime-remove-background/Crossword Puzzle APK The Best App for Relaxing and Unwinding with Crosswords.md DELETED
@@ -1,80 +0,0 @@
1
-
2
- <h1>Crossword Puzzle APK: A Fun and Challenging Game for Your Brain</h1>
3
- <p>Do you love word games? Do you enjoy solving puzzles and learning new things? If you answered yes, then you should try playing crossword puzzle apk. This is an app that lets you play crossword puzzles on your Android device. Whether you are a beginner or an expert, you will find crossword puzzles that suit your level and interest. In this article, we will tell you what a crossword puzzle apk is, why you should play it, how to play it, and what are some of its features.</p>
4
- <h2>cross word puzzle apk</h2><br /><p><b><b>Download File</b> &#9989; <a href="https://jinyurl.com/2uNLFK">https://jinyurl.com/2uNLFK</a></b></p><br /><br />
5
- <h2>What is a crossword puzzle apk?</h2>
6
- <h3>An apk file is a format for installing applications on Android devices</h3>
7
- <p>An apk file is short for Android Package Kit. It is a file format that contains all the elements needed to install an application on an Android device. You can download apk files from various sources, such as websites, app stores, or file-sharing platforms. However, you should be careful about the source of the apk file, as some may contain malware or viruses that can harm your device. You should only download apk files from trusted and reputable sources.</p>
8
- <h3>A crossword puzzle is a word game that involves filling in a grid with words that match the clues</h3>
9
- <p>A crossword puzzle is one of the most popular word games in the world. It consists of a grid of white and black squares, where some of the white squares form horizontal or vertical words. The words are determined by the clues given at the side or bottom of the grid. The clues are usually in the form of definitions, synonyms, antonyms, or wordplay. The goal of the game is to fill in all the white squares with letters that form valid words.</p>
10
- <h3>A crossword puzzle apk is an app that lets you play crossword puzzles on your phone or tablet</h3>
11
- <p>A crossword puzzle apk is an application that you can install on your Android device using an apk file. It allows you to play crossword puzzles on your phone or tablet anytime and anywhere. You can choose from hundreds of crossword puzzles in different categories and difficulties, or you can create your own puzzles using the app's editor. You can also customize the app's settings and themes to suit your preferences.</p>
12
- <h2>Why should you play crossword puzzle apk?</h2>
13
- <h3>Crossword puzzles are good for your brain health and cognitive skills</h3>
14
- <p>Playing crossword puzzles can benefit your brain in many ways. According to research, crossword puzzles can improve your memory, vocabulary, spelling, logic, reasoning, problem-solving, and general knowledge. They can also prevent cognitive decline and dementia by keeping your brain active and stimulated. Crossword puzzles can also reduce stress and improve your mood by providing a sense of accomplishment and satisfaction.</p>
15
- <h3>Crossword puzzles are fun and entertaining for all ages and levels</h3>
16
- <p>Playing crossword puzzles can also be a lot of fun and entertainment. You can challenge yourself with the check button on the top right corner of the screen. The app will highlight any incorrect or incomplete words in red. You can also reveal the solution by tapping on the reveal button on the top left corner of the screen. The app will show you the complete and correct grid. However, this will end your game and you will not get any points or achievements.</p>
17
- <h2>What are some features of crossword puzzle apk?</h2>
18
- <h3>Hundreds of crossword puzzles in different categories and difficulties</h3>
19
- <p>One of the main features of crossword puzzle apk is that it has hundreds of crossword puzzles in different categories and difficulties. You can choose from various themes and topics, such as sports, movies, history, science, and more. You can also select the level of difficulty that suits your skill and interest, such as easy, medium, hard, or expert. You will never run out of crossword puzzles to play with this app.</p>
20
- <h3>Customizable settings and themes to suit your preferences</h3>
21
- <p>Another feature of crossword puzzle apk is that it has customizable settings and themes to suit your preferences. You can change the font size, color, and style of the clues and words. You can also change the background color and image of the grid. You can choose from various themes, such as classic, modern, wood, paper, or dark. You can also adjust the sound effects and music volume of the app.</p>
22
- <h3>Offline mode and cloud sync to play anytime and anywhere</h3>
23
- <p>A third feature of crossword puzzle apk is that it has offline mode and cloud sync to play anytime and anywhere. You can play crossword puzzles without an internet connection by downloading them to your device. You can also sync your progress and achievements to the cloud by signing in with your Google account. This way, you can access your crossword puzzles on any device and resume your game from where you left off.</p>
24
- <p>cross word puzzle game apk download<br />
25
- cross word puzzle app for android free<br />
26
- best cross word puzzle apk offline<br />
27
- cross word puzzle solver apk mod<br />
28
- cross word puzzle maker apk pro<br />
29
- cross word puzzle apk with hints and clues<br />
30
- cross word puzzle generator apk online<br />
31
- cross word puzzle editor apk full version<br />
32
- cross word puzzle apk for kids and adults<br />
33
- cross word puzzle creator apk premium<br />
34
- cross word puzzle apk with daily challenges<br />
35
- cross word puzzle app for android tablet<br />
36
- cross word puzzle apk no ads and in-app purchases<br />
37
- cross word puzzle builder apk cracked<br />
38
- cross word puzzle apk with custom themes<br />
39
- cross word puzzle designer apk unlocked<br />
40
- cross word puzzle apk with voice input and output<br />
41
- cross word puzzle app for android tv<br />
42
- cross word puzzle apk no internet required<br />
43
- cross word puzzle filler apk latest version<br />
44
- cross word puzzle apk with different languages<br />
45
- cross word puzzle app for android wear<br />
46
- cross word puzzle apk with crossword dictionary<br />
47
- cross word puzzle formatter apk updated<br />
48
- cross word puzzle apk with multiple levels of difficulty<br />
49
- cross word puzzle app for android auto<br />
50
- cross word puzzle apk with synonyms and antonyms<br />
51
- cross word puzzle grader apk new<br />
52
- cross word puzzle apk with timer and score<br />
53
- cross word puzzle app for android box<br />
54
- cross word puzzle apk with anagrams and acronyms<br />
55
- cross word puzzle helper apk old version<br />
56
- cross word puzzle apk with images and emojis<br />
57
- cross word puzzle app for android phone<br />
58
- cross word puzzle apk with trivia and facts<br />
59
- cross word puzzle instructor apk beta version<br />
60
- cross word puzzle apk with categories and topics<br />
61
- cross word puzzle app for android emulator<br />
62
- cross word puzzle apk with hints and answers<br />
63
- cross word puzzle learner apk alpha version</p>
64
- <h3>Leaderboards and achievements to track your progress and compete with others</h3>
65
- <p>A fourth feature of crossword puzzle apk is that it has leaderboards and achievements to track your progress and compete with others. You can earn points and stars for completing crossword puzzles and unlocking new levels. You can also earn achievements for reaching certain milestones or completing certain challenges. You can view your rank and score on the global or local leaderboards, and compare them with other players around the world or in your area.</p>
66
- <h2>Conclusion</h2>
67
- <p>Crossword puzzle apk is a fun and challenging game for your brain. It lets you play crossword puzzles on your Android device using an apk file. It has many benefits for your brain health and cognitive skills, as well as for your fun and entertainment. It also has many features that make it convenient and accessible, such as hundreds of crossword puzzles in different categories and difficulties, customizable settings and themes, offline mode and cloud sync, and leaderboards and achievements. If you love word games and puzzles, you should definitely try playing crossword puzzle apk. You will not regret it!</p>
68
- <h2>FAQs</h2>
69
- <h3>What is the best source to download crossword puzzle apk?</h3>
70
- <p>There are many sources to download crossword puzzle apk, but not all of them are safe and reliable. You should only download apk files from trusted and reputable sources, such as official websites, app stores, or file-sharing platforms. You should also check the reviews and ratings of the apk files before downloading them.</p>
71
- <h3>How can I create my own crossword puzzle using the app?</h3>
72
- <p>You can create your own crossword puzzle using the app's editor. You can choose the size of the grid, the theme of the puzzle, and the clues and words that you want to use. You can also save your puzzle and share it with others. To access the editor, you need to tap on the menu button on the top left corner of the screen, and then tap on "Create Puzzle".</p>
73
- <h3>How can I change the theme of the app?</h3>
74
- <p>You can change the theme of the app by tapping on the settings button on the top right corner of the screen, and then tapping on "Theme". You can choose from various themes, such as classic, modern, wood, paper, or dark. You can also change the background color and image of the grid.</p>
75
- <h3>How can I play offline or sync my progress to the cloud?</h3>
76
- <p>You can play offline by downloading the puzzles to your device. You can also sync your progress and achievements to the cloud by signing in with your Google account. To do either of these, you need to tap on the menu button on the top left corner of the screen, and then tap on "Offline Mode" or "Cloud Sync".</p>
77
- <h3>How can I compete with other players online?</h3>
78
- <p>You can compete with other players online by viewing your rank and score on the global or local leaderboards. You can also earn achievements for reaching certain milestones or completing certain challenges. To access these features, you need to tap on the menu button on the top left corner of the screen, and then tap on "Leaderboards" or "Achievements".</p> 401be4b1e0<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py DELETED
@@ -1,346 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 Microsoft and The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import Callable, List, Optional, Tuple, Union
17
-
18
- import paddle
19
- import paddle.nn as nn
20
-
21
- from paddlenlp.transformers import CLIPTextModel, CLIPTokenizer
22
-
23
- from ...configuration_utils import ConfigMixin, register_to_config
24
- from ...modeling_utils import ModelMixin
25
- from ...models import Transformer2DModel, VQModel
26
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
27
- from ...schedulers import VQDiffusionScheduler
28
- from ...utils import logging
29
-
30
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
-
32
- INF = 1e9
33
-
34
-
35
- # paddle logsumexp may has bug
36
- def logsumexp(x, axis=None, keepdim=False):
37
- return paddle.log(x.exp().sum(axis=axis, keepdim=keepdim))
38
-
39
-
40
- class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin):
41
- """
42
- Utility class for storing learned text embeddings for classifier free sampling
43
- """
44
-
45
- @register_to_config
46
- def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None):
47
- super().__init__()
48
-
49
- self.learnable = learnable
50
-
51
- if self.learnable:
52
- assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
53
- assert length is not None, "learnable=True requires `length` to be set"
54
-
55
- embeddings = paddle.zeros([length, hidden_size])
56
- self.embeddings = self.create_parameter(
57
- embeddings.shape, default_initializer=nn.initializer.Assign(embeddings)
58
- )
59
- else:
60
- self.embeddings = None
61
-
62
-
63
- class VQDiffusionPipeline(DiffusionPipeline):
64
- r"""
65
- Pipeline for text-to-image generation using VQ Diffusion
66
-
67
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
68
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
69
-
70
- Args:
71
- vqvae ([`VQModel`]):
72
- Vector Quantized Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent
73
- representations.
74
- text_encoder ([`CLIPTextModel`]):
75
- Frozen text-encoder. VQ Diffusion uses the text portion of
76
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
77
- the [clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) variant.
78
- tokenizer (`CLIPTokenizer`):
79
- Tokenizer of class
80
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
81
- transformer ([`Transformer2DModel`]):
82
- Conditional transformer to denoise the encoded image latents.
83
- scheduler ([`VQDiffusionScheduler`]):
84
- A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
85
- """
86
-
87
- vqvae: VQModel
88
- text_encoder: CLIPTextModel
89
- tokenizer: CLIPTokenizer
90
- transformer: Transformer2DModel
91
- learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings
92
- scheduler: VQDiffusionScheduler
93
-
94
- def __init__(
95
- self,
96
- vqvae: VQModel,
97
- text_encoder: CLIPTextModel,
98
- tokenizer: CLIPTokenizer,
99
- transformer: Transformer2DModel,
100
- scheduler: VQDiffusionScheduler,
101
- learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings,
102
- ):
103
- super().__init__()
104
-
105
- self.register_modules(
106
- vqvae=vqvae,
107
- transformer=transformer,
108
- text_encoder=text_encoder,
109
- tokenizer=tokenizer,
110
- scheduler=scheduler,
111
- learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings,
112
- )
113
-
114
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance):
115
- batch_size = len(prompt) if isinstance(prompt, list) else 1
116
-
117
- # get prompt text embeddings
118
- text_inputs = self.tokenizer(
119
- prompt,
120
- padding="max_length",
121
- max_length=self.tokenizer.model_max_length,
122
- return_tensors="pd",
123
- )
124
- text_input_ids = text_inputs.input_ids
125
-
126
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
127
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
128
- logger.warning(
129
- "The following part of your input was truncated because CLIP can only handle sequences up to"
130
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
131
- )
132
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
133
- text_embeddings = self.text_encoder(text_input_ids)[0]
134
-
135
- # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
136
- # While CLIP does normalize the pooled output of the text transformer when combining
137
- # the image and text embeddings, CLIP does not directly normalize the last hidden state.
138
- #
139
- # CLIP normalizing the pooled output.
140
- # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
141
- text_embeddings = text_embeddings / text_embeddings.norm(axis=-1, keepdim=True)
142
-
143
- # duplicate text embeddings for each generation per prompt, using mps friendly method
144
- bs_embed, seq_len, _ = text_embeddings.shape
145
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
146
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
147
-
148
- if do_classifier_free_guidance:
149
- if self.learned_classifier_free_sampling_embeddings.learnable:
150
- uncond_embeddings = self.learned_classifier_free_sampling_embeddings.embeddings
151
- uncond_embeddings = uncond_embeddings.unsqueeze(0).tile([batch_size, 1, 1])
152
- else:
153
- uncond_tokens = [""] * batch_size
154
-
155
- max_length = text_input_ids.shape[-1]
156
- uncond_input = self.tokenizer(
157
- uncond_tokens,
158
- padding="max_length",
159
- max_length=max_length,
160
- truncation=True,
161
- return_tensors="pd",
162
- )
163
- uncond_embeddings = self.text_encoder(uncond_input.input_ids)[0]
164
- # See comment for normalizing text embeddings
165
- uncond_embeddings = uncond_embeddings / uncond_embeddings.norm(axis=-1, keepdim=True)
166
-
167
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
168
- seq_len = uncond_embeddings.shape[1]
169
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
170
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
171
-
172
- # For classifier free guidance, we need to do two forward passes.
173
- # Here we concatenate the unconditional and text embeddings into a single batch
174
- # to avoid doing two forward passes
175
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
176
-
177
- return text_embeddings
178
-
179
- @paddle.no_grad()
180
- def __call__(
181
- self,
182
- prompt: Union[str, List[str]],
183
- num_inference_steps: int = 100,
184
- guidance_scale: float = 5.0,
185
- truncation_rate: float = 1.0,
186
- num_images_per_prompt: int = 1,
187
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
188
- latents: Optional[paddle.Tensor] = None,
189
- output_type: Optional[str] = "pil",
190
- return_dict: bool = True,
191
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
192
- callback_steps: Optional[int] = 1,
193
- ) -> Union[ImagePipelineOutput, Tuple]:
194
- """
195
- Function invoked when calling the pipeline for generation.
196
-
197
- Args:
198
- prompt (`str` or `List[str]`):
199
- The prompt or prompts to guide the image generation.
200
- num_inference_steps (`int`, *optional*, defaults to 100):
201
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
202
- expense of slower inference.
203
- guidance_scale (`float`, *optional*, defaults to 7.5):
204
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
205
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
206
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
207
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
208
- usually at the expense of lower image quality.
209
- truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)):
210
- Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at
211
- most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above
212
- `truncation_rate` are set to zero.
213
- num_images_per_prompt (`int`, *optional*, defaults to 1):
214
- The number of images to generate per prompt.
215
- generator (`paddle.Generator`, *optional*):
216
- One or a list of paddle generator(s) to make generation deterministic.
217
- latents (`paddle.Tensor` of shape (batch), *optional*):
218
- Pre-generated noisy latents to be used as inputs for image generation. Must be valid embedding indices.
219
- Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will
220
- be generated of completely masked latent pixels.
221
- output_type (`str`, *optional*, defaults to `"pil"`):
222
- The output format of the generated image. Choose between
223
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
224
- return_dict (`bool`, *optional*, defaults to `True`):
225
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
226
- callback (`Callable`, *optional*):
227
- A function that will be called every `callback_steps` steps during inference. The function will be
228
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
229
- callback_steps (`int`, *optional*, defaults to 1):
230
- The frequency at which the `callback` function will be called. If not specified, the callback will be
231
- called at every step.
232
-
233
- Returns:
234
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput `] if
235
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
236
- generated images.
237
- """
238
- if isinstance(prompt, str):
239
- batch_size = 1
240
- elif isinstance(prompt, list):
241
- batch_size = len(prompt)
242
- else:
243
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
244
-
245
- batch_size = batch_size * num_images_per_prompt
246
-
247
- do_classifier_free_guidance = guidance_scale > 1.0
248
-
249
- text_embeddings = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance)
250
-
251
- if (callback_steps is None) or (
252
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
253
- ):
254
- raise ValueError(
255
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
256
- f" {type(callback_steps)}."
257
- )
258
-
259
- # get the initial completely masked latents unless the user supplied it
260
-
261
- latents_shape = [batch_size, self.transformer.num_latent_pixels]
262
- if latents is None:
263
- mask_class = self.transformer.num_vector_embeds - 1
264
- latents = paddle.full(latents_shape, mask_class, dtype="int64")
265
- else:
266
- if latents.shape != latents_shape:
267
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
268
- if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
269
- raise ValueError(
270
- "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
271
- f" {self.transformer.num_vector_embeds - 1} (inclusive)."
272
- )
273
-
274
- # set timesteps
275
- self.scheduler.set_timesteps(num_inference_steps)
276
-
277
- timesteps_tensor = self.scheduler.timesteps
278
-
279
- sample = latents
280
-
281
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
282
- # expand the sample if we are doing classifier free guidance
283
- latent_model_input = paddle.concat([sample] * 2) if do_classifier_free_guidance else sample
284
-
285
- # predict the un-noised image
286
- # model_output == `log_p_x_0`
287
- model_output = self.transformer(
288
- latent_model_input, encoder_hidden_states=text_embeddings, timestep=t
289
- ).sample
290
-
291
- if do_classifier_free_guidance:
292
- model_output_uncond, model_output_text = model_output.chunk(2)
293
- model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
294
- model_output -= logsumexp(model_output, axis=1, keepdim=True)
295
-
296
- model_output = self.truncate(model_output, truncation_rate)
297
-
298
- # remove `log(0)`'s (`-inf`s)
299
- model_output = model_output.clip(-70)
300
-
301
- # compute the previous noisy sample x_t -> x_t-1
302
- sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample
303
-
304
- # call the callback, if provided
305
- if callback is not None and i % callback_steps == 0:
306
- callback(i, t, sample)
307
-
308
- embedding_channels = self.vqvae.config.vq_embed_dim
309
- embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
310
- embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape)
311
- image = self.vqvae.decode(embeddings, force_not_quantize=True).sample
312
-
313
- image = (image / 2 + 0.5).clip(0, 1)
314
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
315
-
316
- if output_type == "pil":
317
- image = self.numpy_to_pil(image)
318
-
319
- if not return_dict:
320
- return (image,)
321
-
322
- return ImagePipelineOutput(images=image)
323
-
324
- def truncate(self, log_p_x_0: paddle.Tensor, truncation_rate: float) -> paddle.Tensor:
325
- """
326
- Truncates log_p_x_0 such that for each column vector, the total cumulative probability is `truncation_rate` The
327
- lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to zero.
328
- """
329
- sorted_log_p_x_0, indices = paddle.topk(log_p_x_0, k=log_p_x_0.shape[1], axis=1)
330
- sorted_p_x_0 = paddle.exp(sorted_log_p_x_0)
331
- keep_mask = (sorted_p_x_0.cumsum(axis=1) < truncation_rate).cast("int64")
332
-
333
- # Ensure that at least the largest probability is not zeroed out
334
- all_true = paddle.full_like(keep_mask[:, 0:1, :], 1)
335
- keep_mask = paddle.concat((all_true, keep_mask), axis=1)
336
- keep_mask = keep_mask[:, :-1, :]
337
-
338
- keep_mask = paddle.take_along_axis(keep_mask, indices.argsort(1), axis=1).cast(
339
- "bool"
340
- ) # keep_mask.gather(indices.argsort(1), axis=1)
341
-
342
- rv = log_p_x_0.clone()
343
- # rv[~keep_mask] = -INF # -inf = log(0)
344
- rv = paddle.where(keep_mask, rv, paddle.to_tensor(-INF, dtype="float32"))
345
-
346
- return rv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/app.py DELETED
@@ -1,103 +0,0 @@
1
- import os
2
- import torch
3
- import librosa
4
- import gradio as gr
5
- from scipy.io.wavfile import write
6
- from transformers import WavLMModel
7
-
8
- import utils
9
- from models import SynthesizerTrn
10
- from mel_processing import mel_spectrogram_torch
11
- from speaker_encoder.voice_encoder import SpeakerEncoder
12
-
13
- '''
14
- def get_wavlm():
15
- os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU')
16
- shutil.move('WavLM-Large.pt', 'wavlm')
17
- '''
18
-
19
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
-
21
- print("Loading FreeVC...")
22
- hps = utils.get_hparams_from_file("configs/freevc.json")
23
- freevc = SynthesizerTrn(
24
- hps.data.filter_length // 2 + 1,
25
- hps.train.segment_size // hps.data.hop_length,
26
- **hps.model).to(device)
27
- _ = freevc.eval()
28
- _ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
29
- smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
30
-
31
- print("Loading FreeVC(24k)...")
32
- hps = utils.get_hparams_from_file("configs/freevc-24.json")
33
- freevc_24 = SynthesizerTrn(
34
- hps.data.filter_length // 2 + 1,
35
- hps.train.segment_size // hps.data.hop_length,
36
- **hps.model).to(device)
37
- _ = freevc_24.eval()
38
- _ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None)
39
-
40
- print("Loading FreeVC-s...")
41
- hps = utils.get_hparams_from_file("configs/freevc-s.json")
42
- freevc_s = SynthesizerTrn(
43
- hps.data.filter_length // 2 + 1,
44
- hps.train.segment_size // hps.data.hop_length,
45
- **hps.model).to(device)
46
- _ = freevc_s.eval()
47
- _ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None)
48
-
49
- print("Loading WavLM for content...")
50
- cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
51
-
52
- def convert(model, src, tgt):
53
- with torch.no_grad():
54
- # tgt
55
- wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
56
- wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
57
- if model == "FreeVC" or model == "FreeVC (24kHz)":
58
- g_tgt = smodel.embed_utterance(wav_tgt)
59
- g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
60
- else:
61
- wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
62
- mel_tgt = mel_spectrogram_torch(
63
- wav_tgt,
64
- hps.data.filter_length,
65
- hps.data.n_mel_channels,
66
- hps.data.sampling_rate,
67
- hps.data.hop_length,
68
- hps.data.win_length,
69
- hps.data.mel_fmin,
70
- hps.data.mel_fmax
71
- )
72
- # src
73
- wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
74
- wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
75
- c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
76
- # infer
77
- if model == "FreeVC":
78
- audio = freevc.infer(c, g=g_tgt)
79
- elif model == "FreeVC-s":
80
- audio = freevc_s.infer(c, mel=mel_tgt)
81
- else:
82
- audio = freevc_24.infer(c, g=g_tgt)
83
- audio = audio[0][0].data.cpu().float().numpy()
84
- if model == "FreeVC" or model == "FreeVC-s":
85
- write("out.wav", hps.data.sampling_rate, audio)
86
- else:
87
- write("out.wav", 24000, audio)
88
- out = "out.wav"
89
- return out
90
-
91
- model = gr.Dropdown(choices=["FreeVC", "FreeVC-s", "FreeVC (24kHz)"], value="FreeVC",type="value", label="Model")
92
- audio1 = gr.inputs.Audio(label="Source Audio", type='filepath')
93
- audio2 = gr.inputs.Audio(label="Reference Audio", type='filepath')
94
- inputs = [model, audio1, audio2]
95
- outputs = gr.outputs.Audio(label="Output Audio", type='filepath')
96
-
97
- title = "FreeVC"
98
- description = "Gradio Demo for FreeVC: Towards High-Quality Text-Free One-Shot Voice Conversion. To use it, simply upload your audio, or click the example to load. Read more at the links below. Note: It seems that the WavLM checkpoint in HuggingFace is a little different from the one used to train FreeVC, which may degrade the performance a bit. In addition, speaker similarity can be largely affected if there are too much silence in the reference audio, so please <strong>trim</strong> it before submitting."
99
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.15418' target='_blank'>Paper</a> | <a href='https://github.com/OlaWod/FreeVC' target='_blank'>Github Repo</a></p>"
100
-
101
- examples=[["FreeVC", 'p225_001.wav', 'p226_002.wav'], ["FreeVC-s", 'p226_002.wav', 'p225_001.wav'], ["FreeVC (24kHz)", 'p225_001.wav', 'p226_002.wav']]
102
-
103
- gr.Interface(convert, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/model_param_init.py DELETED
@@ -1,69 +0,0 @@
1
- import json
2
- import os
3
- import pathlib
4
-
5
- default_param = {}
6
- default_param["bins"] = 768
7
- default_param["unstable_bins"] = 9 # training only
8
- default_param["reduction_bins"] = 762 # training only
9
- default_param["sr"] = 44100
10
- default_param["pre_filter_start"] = 757
11
- default_param["pre_filter_stop"] = 768
12
- default_param["band"] = {}
13
-
14
-
15
- default_param["band"][1] = {
16
- "sr": 11025,
17
- "hl": 128,
18
- "n_fft": 960,
19
- "crop_start": 0,
20
- "crop_stop": 245,
21
- "lpf_start": 61, # inference only
22
- "res_type": "polyphase",
23
- }
24
-
25
- default_param["band"][2] = {
26
- "sr": 44100,
27
- "hl": 512,
28
- "n_fft": 1536,
29
- "crop_start": 24,
30
- "crop_stop": 547,
31
- "hpf_start": 81, # inference only
32
- "res_type": "sinc_best",
33
- }
34
-
35
-
36
- def int_keys(d):
37
- r = {}
38
- for k, v in d:
39
- if k.isdigit():
40
- k = int(k)
41
- r[k] = v
42
- return r
43
-
44
-
45
- class ModelParameters(object):
46
- def __init__(self, config_path=""):
47
- if ".pth" == pathlib.Path(config_path).suffix:
48
- import zipfile
49
-
50
- with zipfile.ZipFile(config_path, "r") as zip:
51
- self.param = json.loads(
52
- zip.read("param.json"), object_pairs_hook=int_keys
53
- )
54
- elif ".json" == pathlib.Path(config_path).suffix:
55
- with open(config_path, "r") as f:
56
- self.param = json.loads(f.read(), object_pairs_hook=int_keys)
57
- else:
58
- self.param = default_param
59
-
60
- for k in [
61
- "mid_side",
62
- "mid_side_b",
63
- "mid_side_b2",
64
- "stereo_w",
65
- "stereo_n",
66
- "reverse",
67
- ]:
68
- if not k in self.param:
69
- self.param[k] = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/app.py DELETED
@@ -1,267 +0,0 @@
1
- import streamlit as st
2
- import streamlit.components.v1 as components
3
- import networkx as nx
4
- import matplotlib.pyplot as plt
5
- from pyvis.network import Network
6
- import got
7
- import numpy as np
8
- import pandas as pd
9
- import time
10
- import re
11
- import graphviz as graphviz
12
- import pydeck as pdk
13
-
14
- from st_click_detector import click_detector
15
-
16
- st.graphviz_chart('''
17
- digraph {
18
- Income -> AbleToBuyOnlyNecessities
19
- Income -> DifficultyBuyingNecessities
20
- Income -> DifficultyWithMoneyManagement
21
- Income -> LowNoIncome
22
- Income -> UninsuredMedicalExpenses
23
- }
24
- ''')
25
-
26
- st.graphviz_chart('''
27
- digraph {
28
- Income -> Continuityof -> Care
29
- Income -> Durable -> Medical -> Equipment
30
- Income -> Finances
31
- Income -> LegalSystem
32
- Income -> Medical -> Dental -> Care
33
- Income -> Medication -> Coordination -> Ordering
34
- Income -> Other -> Community -> Resources
35
- Income -> SocialWork -> Counseling -> Care
36
- Income -> Supplies
37
- }
38
- ''')
39
-
40
- st.graphviz_chart('''
41
- digraph {
42
- MentalHealth -> Apprehension -> Undefined -> Fear -> Anxious
43
- MentalHealth -> Attempts -> Suicide -> Homicide
44
- MentalHealth -> Difficulty -> Managing -> Anger
45
- MentalHealth -> Difficulty -> Managing -> Stress
46
- MentalHealth -> Expresses -> Suicidal -> Homicidal -> Thoughts
47
- MentalHealth -> False -> Beliefs -> Delusions
48
- MentalHealth -> False -> Perceptions -> Hallucinations -> Illusions
49
- MentalHealth -> FlatAffect -> LackofEmotion
50
- MentalHealth -> Irritable -> Agitated -> Aggressive
51
- MentalHealth -> LossofInterest -> Involvementin -> ActivitiesSelfCare
52
- MentalHealth -> MoodSwings
53
- MentalHealth -> Narrowedto -> Scattered -> Attention -> Focus
54
- MentalHealth -> Purposeless -> Compulsive -> RepetitiveActivity
55
- MentalHealth -> Sadness -> Hopelessness -> Decreased -> SelfEsteem
56
- MentalHealth -> Somatic -> Complaints -> Fatigue
57
- }
58
- ''')
59
-
60
- st.graphviz_chart('''
61
- digraph {
62
- MentalHealth -> Anger -> Management
63
- MentalHealth -> Behavioral -> Health -> Care
64
- MentalHealth -> Communication
65
- MentalHealth -> Continuityof -> Care
66
- MentalHealth -> Coping -> Skills
67
- MentalHealth -> Dietary -> Management
68
- MentalHealth -> Discipline
69
- MentalHealth -> EndofLife -> Care
70
- MentalHealth -> Interaction
71
- MentalHealth -> LegalSystem
72
- MentalHealth -> Medical -> Dental -> Care
73
- MentalHealth -> Medication -> ActionSideEffects
74
- MentalHealth -> Medication -> Administration
75
- MentalHealth -> Medication -> CoordinationOrdering
76
- MentalHealth -> Nursing -> Care
77
- MentalHealth -> Nutritionist -> Care
78
- MentalHealth -> Other -> Community -> Resources
79
- MentalHealth -> Relaxation -> Breathing -> Techniques
80
- MentalHealth -> Rest -> Sleep
81
- MentalHealth -> Safety
82
- MentalHealth -> Screening -> Procedures
83
- MentalHealth -> SignsSymptoms -> MentalEmotional
84
- MentalHealth -> SignsSymptoms -> Physical
85
- MentalHealth -> SocialWork -> Counseling -> Care
86
- MentalHealth -> Stress -> Management
87
- MentalHealth -> Support -> Group
88
- MentalHealth -> Support -> System
89
- MentalHealth -> Wellness
90
- }
91
- ''')
92
-
93
-
94
- st.graphviz_chart('''
95
- digraph {
96
- Respiration -> Abnormal -> BreathSoundsCrackles
97
- Respiration -> Abnormal -> IrregularBreathPatterns
98
- Respiration -> Abnormal -> RespiratoryLaboratoryResults
99
- Respiration -> Abnormal -> Sputum
100
- Respiration -> Cough
101
- Respiration -> Noisy -> RespirationswheezingRalesRhonchi
102
- Respiration -> Rhinorrhea -> NasalCongestion
103
- Respiration -> UnabletoBreathe -> Independently
104
- }
105
- ''')
106
-
107
- st.graphviz_chart('''
108
- digraph {
109
- Respiration -> Anatomy -> Physiology
110
- Respiration -> Continuityof -> Care
111
- Respiration -> Coping -> Skills
112
- Respiration -> Dietary -> Management
113
- Respiration -> Durable -> Medical -> Equipment
114
- Respiration -> Education
115
- Respiration -> EndofLife -> Care
116
- Respiration -> Environment
117
- Respiration -> Exercises
118
- Respiration -> Infection -> Precautions
119
- Respiration -> Laboratory -> Findings
120
- Respiration -> Medical -> Dental -> Care
121
- Respiration -> Medication -> Action -> SideEffects
122
- Respiration -> Medication -> Administration
123
- Respiration -> Medication -> Prescription
124
- Respiration -> Medication -> SetUp
125
- Respiration -> Mobility -> Transfers
126
- Respiration -> Nursing -> Care
127
- Respiration -> Positioning
128
- Respiration -> Relaxation -> Breathing -> Techniques
129
- Respiration -> Respiratory -> Care
130
- Respiration -> Respiratory -> Therapy -> Care
131
- Respiration -> Safety
132
- Respiration -> Screening -> Procedures
133
- Respiration -> SignsSymptoms -> MentalEmotional
134
- Respiration -> SignsSymptoms -> Physical
135
- Respiration -> Specimen -> Collection
136
- Respiration -> Supplies
137
- Respiration -> Support -> Group
138
- Respiration -> Support -> System
139
- Respiration -> Wellness
140
- }
141
- ''')
142
-
143
-
144
- st.graphviz_chart('''
145
- digraph {
146
- Circulation -> Abnormal -> BloodPressureReading
147
- Circulation -> Abnormal -> CardiacLaboratoryResults
148
- Circulation -> Abnormal -> Clotting
149
- Circulation -> Abnormal -> HeartSoundsMurmurs
150
- Circulation -> Anginal -> Pain
151
- Circulation -> Cramping -> Pain -> ofExtremities
152
- Circulation -> Decreased -> Pulses
153
- Circulation -> Discoloration -> ofSkinCyanosis
154
- Circulation -> EdemaSwelling -> inlegsarmsfeet
155
- Circulation -> ExcessivelyRapid -> HeartRate
156
- Circulation -> IrregularHeartRate
157
- Circulation -> SyncopalEpisodes -> Fainting -> Dizziness
158
- Circulation -> TemperatureChange -> inAffectedArea
159
- Circulation -> Varicosities
160
- }
161
- ''')
162
-
163
- st.graphviz_chart('''
164
- digraph {
165
- Circulation -> Anatomy -> Physiology
166
- Circulation -> Cardiac -> Care
167
- Circulation -> Continuityof -> Care
168
- Circulation -> Coping -> Skills
169
- Circulation -> Dietary -> Management
170
- Circulation -> Durable -> Medical -> Equipment
171
- Circulation -> Exercises
172
- Circulation -> Finances
173
- Circulation -> Infection -> Precautions
174
- Circulation -> Laboratory -> Findings
175
- Circulation -> Medical -> Dental -> Care
176
- Circulation -> Medication -> Action -> SideEffects
177
- Circulation -> Medication -> Administration
178
- Circulation -> Medication -> SetUp
179
- Circulation -> Mobility -> Transfers
180
- Circulation -> Nursing -> Care
181
- Circulation -> Personal -> Hygiene
182
- Circulation -> Relaxation -> Breathing -> Techniques
183
- Circulation -> Safety
184
- Circulation -> Screening -> Procedures
185
- Circulation -> SignsSymptoms -> MentalEmotional
186
- Circulation -> SignsSymptoms -> Physical
187
- Circulation -> Support -> Group
188
- Circulation -> Support -> System
189
- Circulation -> Wellness
190
- }
191
- ''')
192
-
193
- df = pd.read_csv("testfile.csv")
194
- @st.cache
195
- def convert_df(df):
196
- return df.to_csv().encode('utf-8')
197
- csv = convert_df(df)
198
- st.download_button(
199
- "Press to Download",
200
- csv,
201
- "testfile.csv",
202
- "text/csv",
203
- key='download-csv'
204
- )
205
-
206
-
207
- st.title('Streamlit Visualization')
208
- dataframe = pd.DataFrame(np.random.randn(10, 20),
209
- columns = ('col %d' % i
210
- for i in range(20)))
211
- st.write(dataframe)
212
-
213
- dataframe = pd.DataFrame(np.random.randn(10, 5),
214
- columns = ('col %d' % i
215
- for i in range(5)))
216
- dataframe
217
- st.write('This is a line_chart.')
218
- st.line_chart(dataframe)
219
-
220
- st.write('This is a area_chart.')
221
- st.area_chart(dataframe)
222
-
223
- st.write('This is a bar_chart.')
224
- st.bar_chart(dataframe)
225
-
226
- st.write('Map data')
227
- data_of_map = pd.DataFrame(
228
- np.random.randn(1000, 2) / [60, 60] + [36.66, -121.6],
229
- columns = ['latitude', 'longitude'])
230
- st.map(data_of_map)
231
-
232
-
233
- st.title('Pyvis VisJS DOTlang Legend')
234
-
235
- Network(notebook=True)
236
- # make Network show itself with repr_html
237
-
238
- def net_repr_html(self):
239
- nodes, edges, height, width, options = self.get_network_data()
240
- html = self.template.render(height=height, width=width, nodes=nodes, edges=edges, options=options)
241
- return html
242
-
243
- Network._repr_html_ = net_repr_html
244
-
245
- st.sidebar.title('Choose your favorite Graph')
246
- option=st.sidebar.selectbox('select graph',('Simple','Karate', 'GOT'))
247
- physics=st.sidebar.checkbox('add physics interactivity?')
248
- got.simple_func(physics)
249
-
250
- if option=='Simple':
251
- HtmlFile = open("test.html", 'r', encoding='utf-8')
252
- source_code = HtmlFile.read()
253
- components.html(source_code, height = 900,width=900)
254
-
255
- got.got_func(physics)
256
-
257
- if option=='GOT':
258
- HtmlFile = open("gameofthrones.html", 'r', encoding='utf-8')
259
- source_code = HtmlFile.read()
260
- components.html(source_code, height = 1200,width=1000)
261
-
262
- got.karate_func(physics)
263
-
264
- if option=='Karate':
265
- HtmlFile = open("karate.html", 'r', encoding='utf-8')
266
- source_code = HtmlFile.read()
267
- components.html(source_code, height = 1200,width=1000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIARTCHAN/openpose_editor/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/metrics/kld.py DELETED
@@ -1,218 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import contextlib
8
- from functools import partial
9
- import logging
10
- import os
11
- import typing as tp
12
-
13
- import torch
14
- import torchmetrics
15
-
16
- from ..data.audio_utils import convert_audio
17
-
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class _patch_passt_stft:
23
- """Decorator to patch torch.stft in PaSST."""
24
- def __init__(self):
25
- self.old_stft = torch.stft
26
-
27
- def __enter__(self):
28
- # return_complex is a mandatory parameter in latest torch versions
29
- # torch is throwing RuntimeErrors when not set
30
- torch.stft = partial(torch.stft, return_complex=False)
31
-
32
- def __exit__(self, *exc):
33
- torch.stft = self.old_stft
34
-
35
-
36
- def kl_divergence(pred_probs: torch.Tensor, target_probs: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
37
- """Computes the elementwise KL-Divergence loss between probability distributions
38
- from generated samples and target samples.
39
-
40
- Args:
41
- pred_probs (torch.Tensor): Probabilities for each label obtained
42
- from a classifier on generated audio. Expected shape is [B, num_classes].
43
- target_probs (torch.Tensor): Probabilities for each label obtained
44
- from a classifier on target audio. Expected shape is [B, num_classes].
45
- epsilon (float): Epsilon value.
46
- Returns:
47
- kld (torch.Tensor): KLD loss between each generated sample and target pair.
48
- """
49
- kl_div = torch.nn.functional.kl_div((pred_probs + epsilon).log(), target_probs, reduction="none")
50
- return kl_div.sum(-1)
51
-
52
-
53
- class KLDivergenceMetric(torchmetrics.Metric):
54
- """Base implementation for KL Divergence metric.
55
-
56
- The KL divergence is measured between probability distributions
57
- of class predictions returned by a pre-trained audio classification model.
58
- When the KL-divergence is low, the generated audio is expected to
59
- have similar acoustic characteristics as the reference audio,
60
- according to the classifier.
61
- """
62
- def __init__(self):
63
- super().__init__()
64
- self.add_state("kld_pq_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
65
- self.add_state("kld_qp_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
66
- self.add_state("kld_all_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
67
- self.add_state("weight", default=torch.tensor(0), dist_reduce_fx="sum")
68
-
69
- def _get_label_distribution(self, x: torch.Tensor, sizes: torch.Tensor,
70
- sample_rates: torch.Tensor) -> tp.Optional[torch.Tensor]:
71
- """Get model output given provided input tensor.
72
-
73
- Args:
74
- x (torch.Tensor): Input audio tensor of shape [B, C, T].
75
- sizes (torch.Tensor): Actual audio sample length, of shape [B].
76
- sample_rates (torch.Tensor): Actual audio sample rate, of shape [B].
77
- Returns:
78
- probs (torch.Tensor): Probabilities over labels, of shape [B, num_classes].
79
- """
80
- raise NotImplementedError("implement method to extract label distributions from the model.")
81
-
82
- def update(self, preds: torch.Tensor, targets: torch.Tensor,
83
- sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
84
- """Calculates running KL-Divergence loss between batches of audio
85
- preds (generated) and target (ground-truth)
86
- Args:
87
- preds (torch.Tensor): Audio samples to evaluate, of shape [B, C, T].
88
- targets (torch.Tensor): Target samples to compare against, of shape [B, C, T].
89
- sizes (torch.Tensor): Actual audio sample length, of shape [B].
90
- sample_rates (torch.Tensor): Actual audio sample rate, of shape [B].
91
- """
92
- assert preds.shape == targets.shape
93
- assert preds.size(0) > 0, "Cannot update the loss with empty tensors"
94
- preds_probs = self._get_label_distribution(preds, sizes, sample_rates)
95
- targets_probs = self._get_label_distribution(targets, sizes, sample_rates)
96
- if preds_probs is not None and targets_probs is not None:
97
- assert preds_probs.shape == targets_probs.shape
98
- kld_scores = kl_divergence(preds_probs, targets_probs)
99
- assert not torch.isnan(kld_scores).any(), "kld_scores contains NaN value(s)!"
100
- self.kld_pq_sum += torch.sum(kld_scores)
101
- kld_qp_scores = kl_divergence(targets_probs, preds_probs)
102
- self.kld_qp_sum += torch.sum(kld_qp_scores)
103
- self.weight += torch.tensor(kld_scores.size(0))
104
-
105
- def compute(self) -> dict:
106
- """Computes KL-Divergence across all evaluated pred/target pairs."""
107
- weight: float = float(self.weight.item()) # type: ignore
108
- assert weight > 0, "Unable to compute with total number of comparisons <= 0"
109
- logger.info(f"Computing KL divergence on a total of {weight} samples")
110
- kld_pq = self.kld_pq_sum.item() / weight # type: ignore
111
- kld_qp = self.kld_qp_sum.item() / weight # type: ignore
112
- kld_both = kld_pq + kld_qp
113
- return {'kld': kld_pq, 'kld_pq': kld_pq, 'kld_qp': kld_qp, 'kld_both': kld_both}
114
-
115
-
116
- class PasstKLDivergenceMetric(KLDivergenceMetric):
117
- """KL-Divergence metric based on pre-trained PASST classifier on AudioSet.
118
-
119
- From: PaSST: Efficient Training of Audio Transformers with Patchout
120
- Paper: https://arxiv.org/abs/2110.05069
121
- Implementation: https://github.com/kkoutini/PaSST
122
-
123
- Follow instructions from the github repo:
124
- ```
125
- pip install 'git+https://github.com/kkoutini/[email protected]#egg=hear21passt'
126
- ```
127
-
128
- Args:
129
- pretrained_length (float, optional): Audio duration used for the pretrained model.
130
- """
131
- def __init__(self, pretrained_length: tp.Optional[float] = None):
132
- super().__init__()
133
- self._initialize_model(pretrained_length)
134
-
135
- def _initialize_model(self, pretrained_length: tp.Optional[float] = None):
136
- """Initialize underlying PaSST audio classifier."""
137
- model, sr, max_frames, min_frames = self._load_base_model(pretrained_length)
138
- self.min_input_frames = min_frames
139
- self.max_input_frames = max_frames
140
- self.model_sample_rate = sr
141
- self.model = model
142
- self.model.eval()
143
- self.model.to(self.device)
144
-
145
- def _load_base_model(self, pretrained_length: tp.Optional[float]):
146
- """Load pretrained model from PaSST."""
147
- try:
148
- if pretrained_length == 30:
149
- from hear21passt.base30sec import get_basic_model # type: ignore
150
- max_duration = 30
151
- elif pretrained_length == 20:
152
- from hear21passt.base20sec import get_basic_model # type: ignore
153
- max_duration = 20
154
- else:
155
- from hear21passt.base import get_basic_model # type: ignore
156
- # Original PASST was trained on AudioSet with 10s-long audio samples
157
- max_duration = 10
158
- min_duration = 0.15
159
- min_duration = 0.15
160
- except ModuleNotFoundError:
161
- raise ModuleNotFoundError(
162
- "Please install hear21passt to compute KL divergence: ",
163
- "pip install 'git+https://github.com/kkoutini/[email protected]#egg=hear21passt'"
164
- )
165
- model_sample_rate = 32_000
166
- max_input_frames = int(max_duration * model_sample_rate)
167
- min_input_frames = int(min_duration * model_sample_rate)
168
- with open(os.devnull, 'w') as f, contextlib.redirect_stdout(f):
169
- model = get_basic_model(mode='logits')
170
- return model, model_sample_rate, max_input_frames, min_input_frames
171
-
172
- def _process_audio(self, wav: torch.Tensor, sample_rate: int, wav_len: int) -> tp.Optional[torch.Tensor]:
173
- wav = wav.unsqueeze(0)
174
- wav = wav[..., :wav_len]
175
- wav = convert_audio(wav, from_rate=sample_rate, to_rate=self.model_sample_rate, to_channels=1)
176
- wav = wav.squeeze(0)
177
- # create chunks of audio to match the classifier processing length
178
- segments = torch.split(wav, self.max_input_frames, dim=-1)
179
- valid_segments = []
180
- for s in segments:
181
- if s.size(-1) > self.min_input_frames:
182
- s = torch.nn.functional.pad(s, (0, self.max_input_frames - s.shape[-1]))
183
- valid_segments.append(s)
184
- if len(valid_segments) > 0:
185
- return torch.stack(valid_segments, dim=0)
186
- else:
187
- return None
188
-
189
- def _get_label_distribution(self, x: torch.Tensor, sizes: torch.Tensor,
190
- sample_rates: torch.Tensor) -> tp.Optional[torch.Tensor]:
191
- """Get model output given provided input tensor.
192
-
193
- Args:
194
- x (torch.Tensor): Input audio tensor of shape [B, C, T].
195
- sizes (torch.Tensor): Actual audio sample length, of shape [B].
196
- sample_rates (torch.Tensor): Actual audio sample rate, of shape [B].
197
- Returns:
198
- probs (torch.Tensor, optional): Probabilities over labels, of shape [B, num_classes].
199
- """
200
- all_probs: tp.List[torch.Tensor] = []
201
- for i, wav in enumerate(x):
202
- sample_rate = int(sample_rates[i].item())
203
- wav_len = int(sizes[i].item())
204
- wav = self._process_audio(wav, sample_rate, wav_len)
205
- if wav is not None:
206
- assert wav.dim() == 3, f"Unexpected number of dims for preprocessed wav: {wav.shape}"
207
- wav = wav.mean(dim=1)
208
- # PaSST is printing a lot of infos that we are not interested in
209
- with open(os.devnull, 'w') as f, contextlib.redirect_stdout(f):
210
- with torch.no_grad(), _patch_passt_stft():
211
- logits = self.model(wav.to(self.device))
212
- probs = torch.softmax(logits, dim=-1)
213
- probs = probs.mean(dim=0)
214
- all_probs.append(probs)
215
- if len(all_probs) > 0:
216
- return torch.stack(all_probs, dim=0)
217
- else:
218
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py DELETED
@@ -1,138 +0,0 @@
1
- _base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
2
-
3
- # ========================modified parameters======================
4
- img_scale = (1280, 1280) # width, height
5
- num_classes = 80 # Number of classes for classification
6
- # Config of batch shapes. Only on val.
7
- # It means not used if batch_shapes_cfg is None.
8
- batch_shapes_cfg = dict(
9
- img_size=img_scale[0],
10
- # The image scale of padding should be divided by pad_size_divisor
11
- size_divisor=64)
12
- # Basic size of multi-scale prior box
13
- anchors = [
14
- [(19, 27), (44, 40), (38, 94)], # P3/8
15
- [(96, 68), (86, 152), (180, 137)], # P4/16
16
- [(140, 301), (303, 264), (238, 542)], # P5/32
17
- [(436, 615), (739, 380), (925, 792)] # P6/64
18
- ]
19
- # Strides of multi-scale prior box
20
- strides = [8, 16, 32, 64]
21
- num_det_layers = 4 # The number of model output scales
22
- loss_cls_weight = 0.5
23
- loss_bbox_weight = 0.05
24
- loss_obj_weight = 1.0
25
- # The obj loss weights of the three output layers
26
- obj_level_weights = [4.0, 1.0, 0.25, 0.06]
27
- affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio
28
-
29
- tta_img_scales = [(1280, 1280), (1024, 1024), (1536, 1536)]
30
- # =======================Unmodified in most cases==================
31
- model = dict(
32
- backbone=dict(arch='P6', out_indices=(2, 3, 4, 5)),
33
- neck=dict(
34
- in_channels=[256, 512, 768, 1024], out_channels=[256, 512, 768, 1024]),
35
- bbox_head=dict(
36
- head_module=dict(
37
- in_channels=[256, 512, 768, 1024], featmap_strides=strides),
38
- prior_generator=dict(base_sizes=anchors, strides=strides),
39
- # scaled based on number of detection layers
40
- loss_cls=dict(loss_weight=loss_cls_weight *
41
- (num_classes / 80 * 3 / num_det_layers)),
42
- loss_bbox=dict(loss_weight=loss_bbox_weight * (3 / num_det_layers)),
43
- loss_obj=dict(loss_weight=loss_obj_weight *
44
- ((img_scale[0] / 640)**2 * 3 / num_det_layers)),
45
- obj_level_weights=obj_level_weights))
46
-
47
- pre_transform = _base_.pre_transform
48
- albu_train_transforms = _base_.albu_train_transforms
49
-
50
- train_pipeline = [
51
- *pre_transform,
52
- dict(
53
- type='Mosaic',
54
- img_scale=img_scale,
55
- pad_val=114.0,
56
- pre_transform=pre_transform),
57
- dict(
58
- type='YOLOv5RandomAffine',
59
- max_rotate_degree=0.0,
60
- max_shear_degree=0.0,
61
- scaling_ratio_range=(1 - affine_scale, 1 + affine_scale),
62
- # img_scale is (width, height)
63
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
64
- border_val=(114, 114, 114)),
65
- dict(
66
- type='mmdet.Albu',
67
- transforms=albu_train_transforms,
68
- bbox_params=dict(
69
- type='BboxParams',
70
- format='pascal_voc',
71
- label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
72
- keymap={
73
- 'img': 'image',
74
- 'gt_bboxes': 'bboxes'
75
- }),
76
- dict(type='YOLOv5HSVRandomAug'),
77
- dict(type='mmdet.RandomFlip', prob=0.5),
78
- dict(
79
- type='mmdet.PackDetInputs',
80
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
81
- 'flip_direction'))
82
- ]
83
-
84
- train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
85
-
86
- test_pipeline = [
87
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
88
- dict(type='YOLOv5KeepRatioResize', scale=img_scale),
89
- dict(
90
- type='LetterResize',
91
- scale=img_scale,
92
- allow_scale_up=False,
93
- pad_val=dict(img=114)),
94
- dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
95
- dict(
96
- type='mmdet.PackDetInputs',
97
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
98
- 'scale_factor', 'pad_param'))
99
- ]
100
-
101
- val_dataloader = dict(
102
- dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg))
103
-
104
- test_dataloader = val_dataloader
105
-
106
- # Config for Test Time Augmentation. (TTA)
107
- _multiscale_resize_transforms = [
108
- dict(
109
- type='Compose',
110
- transforms=[
111
- dict(type='YOLOv5KeepRatioResize', scale=s),
112
- dict(
113
- type='LetterResize',
114
- scale=s,
115
- allow_scale_up=False,
116
- pad_val=dict(img=114))
117
- ]) for s in tta_img_scales
118
- ]
119
-
120
- tta_pipeline = [
121
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
122
- dict(
123
- type='TestTimeAug',
124
- transforms=[
125
- _multiscale_resize_transforms,
126
- [
127
- dict(type='mmdet.RandomFlip', prob=1.),
128
- dict(type='mmdet.RandomFlip', prob=0.)
129
- ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)],
130
- [
131
- dict(
132
- type='mmdet.PackDetInputs',
133
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
134
- 'scale_factor', 'pad_param', 'flip',
135
- 'flip_direction'))
136
- ]
137
- ])
138
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/util/learning_rates.py DELETED
@@ -1,70 +0,0 @@
1
- import numpy as np
2
- import math
3
-
4
-
5
- class LearningRateDecay:
6
- def __init__(self, lr=0.002, warmup_steps=4000.0) -> None:
7
- self.lr = lr
8
- self.warmup_steps = warmup_steps
9
-
10
- def __call__(self, global_step) -> float:
11
- step = global_step + 1.0
12
- lr = (
13
- self.lr
14
- * self.warmup_steps ** 0.5
15
- * np.minimum(step * self.warmup_steps ** -1.5, step ** -0.5)
16
- )
17
-
18
- return lr
19
-
20
- class SquareRootScheduler:
21
- def __init__(self, lr=0.1):
22
- self.lr = lr
23
-
24
- def __call__(self, global_step):
25
- global_step = global_step // 1000
26
- return self.lr * pow(global_step + 1.0, -0.5)
27
-
28
-
29
- class CosineScheduler:
30
- def __init__(
31
- self, max_update, base_lr=0.02, final_lr=0, warmup_steps=0, warmup_begin_lr=0
32
- ):
33
- self.base_lr_orig = base_lr
34
- self.max_update = max_update
35
- self.final_lr = final_lr
36
- self.warmup_steps = warmup_steps
37
- self.warmup_begin_lr = warmup_begin_lr
38
- self.max_steps = self.max_update - self.warmup_steps
39
-
40
- def get_warmup_lr(self, global_step):
41
- increase = (
42
- (self.base_lr_orig - self.warmup_begin_lr)
43
- * float(global_step)
44
- / float(self.warmup_steps)
45
- )
46
- return self.warmup_begin_lr + increase
47
-
48
- def __call__(self, global_step):
49
- if global_step < self.warmup_steps:
50
- return self.get_warmup_lr(global_step)
51
- if global_step <= self.max_update:
52
- self.base_lr = (
53
- self.final_lr
54
- + (self.base_lr_orig - self.final_lr)
55
- * (
56
- 1
57
- + math.cos(
58
- math.pi * (global_step - self.warmup_steps) / self.max_steps
59
- )
60
- )
61
- / 2
62
- )
63
- return self.base_lr
64
-
65
- def adjust_learning_rate(optimizer, global_step):
66
- lr = LearningRateDecay()(global_step=global_step)
67
- for param_group in optimizer.param_groups:
68
- param_group["lr"] = lr
69
- return lr
70
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/base.py DELETED
@@ -1,30 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- from pydantic import BaseModel
6
-
7
- from agentverse.message import Message
8
-
9
- from . import selector_registry as SelectorRegistry
10
- from abc import abstractmethod
11
-
12
- if TYPE_CHECKING:
13
- from agentverse.environments import BaseEnvironment
14
-
15
-
16
- @SelectorRegistry.register("base")
17
- class BaseSelector(BaseModel):
18
- """
19
- Base class for all selecters
20
- """
21
-
22
- @abstractmethod
23
- def select_message(
24
- self, environment: BaseEnvironment, messages: List[Message]
25
- ) -> List[Message]:
26
- """Selects a set of valid messages from all messages"""
27
- pass
28
-
29
- def reset(self) -> None:
30
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circularprogresscanvas/CircularProgressCanvas.js DELETED
@@ -1,2 +0,0 @@
1
- import CircularProgressCanvas from '../../../plugins/circularprogresscanvas.js';
2
- export default CircularProgressCanvas;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Container.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Container from '../../../plugins/containerlite';
2
- export default Container;
 
 
 
spaces/AkitoP/umamusume_bert_vits2/attentions.py DELETED
@@ -1,464 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- import logging
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- class LayerNorm(nn.Module):
13
- def __init__(self, channels, eps=1e-5):
14
- super().__init__()
15
- self.channels = channels
16
- self.eps = eps
17
-
18
- self.gamma = nn.Parameter(torch.ones(channels))
19
- self.beta = nn.Parameter(torch.zeros(channels))
20
-
21
- def forward(self, x):
22
- x = x.transpose(1, -1)
23
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
24
- return x.transpose(1, -1)
25
-
26
-
27
- @torch.jit.script
28
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
29
- n_channels_int = n_channels[0]
30
- in_act = input_a + input_b
31
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
32
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
33
- acts = t_act * s_act
34
- return acts
35
-
36
-
37
- class Encoder(nn.Module):
38
- def __init__(
39
- self,
40
- hidden_channels,
41
- filter_channels,
42
- n_heads,
43
- n_layers,
44
- kernel_size=1,
45
- p_dropout=0.0,
46
- window_size=4,
47
- isflow=True,
48
- **kwargs
49
- ):
50
- super().__init__()
51
- self.hidden_channels = hidden_channels
52
- self.filter_channels = filter_channels
53
- self.n_heads = n_heads
54
- self.n_layers = n_layers
55
- self.kernel_size = kernel_size
56
- self.p_dropout = p_dropout
57
- self.window_size = window_size
58
- # if isflow:
59
- # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
60
- # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
61
- # self.cond_layer = weight_norm(cond_layer, name='weight')
62
- # self.gin_channels = 256
63
- self.cond_layer_idx = self.n_layers
64
- if "gin_channels" in kwargs:
65
- self.gin_channels = kwargs["gin_channels"]
66
- if self.gin_channels != 0:
67
- self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
68
- # vits2 says 3rd block, so idx is 2 by default
69
- self.cond_layer_idx = (
70
- kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
71
- )
72
- logging.debug(self.gin_channels, self.cond_layer_idx)
73
- assert (
74
- self.cond_layer_idx < self.n_layers
75
- ), "cond_layer_idx should be less than n_layers"
76
- self.drop = nn.Dropout(p_dropout)
77
- self.attn_layers = nn.ModuleList()
78
- self.norm_layers_1 = nn.ModuleList()
79
- self.ffn_layers = nn.ModuleList()
80
- self.norm_layers_2 = nn.ModuleList()
81
- for i in range(self.n_layers):
82
- self.attn_layers.append(
83
- MultiHeadAttention(
84
- hidden_channels,
85
- hidden_channels,
86
- n_heads,
87
- p_dropout=p_dropout,
88
- window_size=window_size,
89
- )
90
- )
91
- self.norm_layers_1.append(LayerNorm(hidden_channels))
92
- self.ffn_layers.append(
93
- FFN(
94
- hidden_channels,
95
- hidden_channels,
96
- filter_channels,
97
- kernel_size,
98
- p_dropout=p_dropout,
99
- )
100
- )
101
- self.norm_layers_2.append(LayerNorm(hidden_channels))
102
-
103
- def forward(self, x, x_mask, g=None):
104
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
105
- x = x * x_mask
106
- for i in range(self.n_layers):
107
- if i == self.cond_layer_idx and g is not None:
108
- g = self.spk_emb_linear(g.transpose(1, 2))
109
- g = g.transpose(1, 2)
110
- x = x + g
111
- x = x * x_mask
112
- y = self.attn_layers[i](x, x, attn_mask)
113
- y = self.drop(y)
114
- x = self.norm_layers_1[i](x + y)
115
-
116
- y = self.ffn_layers[i](x, x_mask)
117
- y = self.drop(y)
118
- x = self.norm_layers_2[i](x + y)
119
- x = x * x_mask
120
- return x
121
-
122
-
123
- class Decoder(nn.Module):
124
- def __init__(
125
- self,
126
- hidden_channels,
127
- filter_channels,
128
- n_heads,
129
- n_layers,
130
- kernel_size=1,
131
- p_dropout=0.0,
132
- proximal_bias=False,
133
- proximal_init=True,
134
- **kwargs
135
- ):
136
- super().__init__()
137
- self.hidden_channels = hidden_channels
138
- self.filter_channels = filter_channels
139
- self.n_heads = n_heads
140
- self.n_layers = n_layers
141
- self.kernel_size = kernel_size
142
- self.p_dropout = p_dropout
143
- self.proximal_bias = proximal_bias
144
- self.proximal_init = proximal_init
145
-
146
- self.drop = nn.Dropout(p_dropout)
147
- self.self_attn_layers = nn.ModuleList()
148
- self.norm_layers_0 = nn.ModuleList()
149
- self.encdec_attn_layers = nn.ModuleList()
150
- self.norm_layers_1 = nn.ModuleList()
151
- self.ffn_layers = nn.ModuleList()
152
- self.norm_layers_2 = nn.ModuleList()
153
- for i in range(self.n_layers):
154
- self.self_attn_layers.append(
155
- MultiHeadAttention(
156
- hidden_channels,
157
- hidden_channels,
158
- n_heads,
159
- p_dropout=p_dropout,
160
- proximal_bias=proximal_bias,
161
- proximal_init=proximal_init,
162
- )
163
- )
164
- self.norm_layers_0.append(LayerNorm(hidden_channels))
165
- self.encdec_attn_layers.append(
166
- MultiHeadAttention(
167
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
168
- )
169
- )
170
- self.norm_layers_1.append(LayerNorm(hidden_channels))
171
- self.ffn_layers.append(
172
- FFN(
173
- hidden_channels,
174
- hidden_channels,
175
- filter_channels,
176
- kernel_size,
177
- p_dropout=p_dropout,
178
- causal=True,
179
- )
180
- )
181
- self.norm_layers_2.append(LayerNorm(hidden_channels))
182
-
183
- def forward(self, x, x_mask, h, h_mask):
184
- """
185
- x: decoder input
186
- h: encoder output
187
- """
188
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
189
- device=x.device, dtype=x.dtype
190
- )
191
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
192
- x = x * x_mask
193
- for i in range(self.n_layers):
194
- y = self.self_attn_layers[i](x, x, self_attn_mask)
195
- y = self.drop(y)
196
- x = self.norm_layers_0[i](x + y)
197
-
198
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
199
- y = self.drop(y)
200
- x = self.norm_layers_1[i](x + y)
201
-
202
- y = self.ffn_layers[i](x, x_mask)
203
- y = self.drop(y)
204
- x = self.norm_layers_2[i](x + y)
205
- x = x * x_mask
206
- return x
207
-
208
-
209
- class MultiHeadAttention(nn.Module):
210
- def __init__(
211
- self,
212
- channels,
213
- out_channels,
214
- n_heads,
215
- p_dropout=0.0,
216
- window_size=None,
217
- heads_share=True,
218
- block_length=None,
219
- proximal_bias=False,
220
- proximal_init=False,
221
- ):
222
- super().__init__()
223
- assert channels % n_heads == 0
224
-
225
- self.channels = channels
226
- self.out_channels = out_channels
227
- self.n_heads = n_heads
228
- self.p_dropout = p_dropout
229
- self.window_size = window_size
230
- self.heads_share = heads_share
231
- self.block_length = block_length
232
- self.proximal_bias = proximal_bias
233
- self.proximal_init = proximal_init
234
- self.attn = None
235
-
236
- self.k_channels = channels // n_heads
237
- self.conv_q = nn.Conv1d(channels, channels, 1)
238
- self.conv_k = nn.Conv1d(channels, channels, 1)
239
- self.conv_v = nn.Conv1d(channels, channels, 1)
240
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
241
- self.drop = nn.Dropout(p_dropout)
242
-
243
- if window_size is not None:
244
- n_heads_rel = 1 if heads_share else n_heads
245
- rel_stddev = self.k_channels**-0.5
246
- self.emb_rel_k = nn.Parameter(
247
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
248
- * rel_stddev
249
- )
250
- self.emb_rel_v = nn.Parameter(
251
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
252
- * rel_stddev
253
- )
254
-
255
- nn.init.xavier_uniform_(self.conv_q.weight)
256
- nn.init.xavier_uniform_(self.conv_k.weight)
257
- nn.init.xavier_uniform_(self.conv_v.weight)
258
- if proximal_init:
259
- with torch.no_grad():
260
- self.conv_k.weight.copy_(self.conv_q.weight)
261
- self.conv_k.bias.copy_(self.conv_q.bias)
262
-
263
- def forward(self, x, c, attn_mask=None):
264
- q = self.conv_q(x)
265
- k = self.conv_k(c)
266
- v = self.conv_v(c)
267
-
268
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
269
-
270
- x = self.conv_o(x)
271
- return x
272
-
273
- def attention(self, query, key, value, mask=None):
274
- # reshape [b, d, t] -> [b, n_h, t, d_k]
275
- b, d, t_s, t_t = (*key.size(), query.size(2))
276
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
277
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
278
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
279
-
280
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
281
- if self.window_size is not None:
282
- assert (
283
- t_s == t_t
284
- ), "Relative attention is only available for self-attention."
285
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
286
- rel_logits = self._matmul_with_relative_keys(
287
- query / math.sqrt(self.k_channels), key_relative_embeddings
288
- )
289
- scores_local = self._relative_position_to_absolute_position(rel_logits)
290
- scores = scores + scores_local
291
- if self.proximal_bias:
292
- assert t_s == t_t, "Proximal bias is only available for self-attention."
293
- scores = scores + self._attention_bias_proximal(t_s).to(
294
- device=scores.device, dtype=scores.dtype
295
- )
296
- if mask is not None:
297
- scores = scores.masked_fill(mask == 0, -1e4)
298
- if self.block_length is not None:
299
- assert (
300
- t_s == t_t
301
- ), "Local attention is only available for self-attention."
302
- block_mask = (
303
- torch.ones_like(scores)
304
- .triu(-self.block_length)
305
- .tril(self.block_length)
306
- )
307
- scores = scores.masked_fill(block_mask == 0, -1e4)
308
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
309
- p_attn = self.drop(p_attn)
310
- output = torch.matmul(p_attn, value)
311
- if self.window_size is not None:
312
- relative_weights = self._absolute_position_to_relative_position(p_attn)
313
- value_relative_embeddings = self._get_relative_embeddings(
314
- self.emb_rel_v, t_s
315
- )
316
- output = output + self._matmul_with_relative_values(
317
- relative_weights, value_relative_embeddings
318
- )
319
- output = (
320
- output.transpose(2, 3).contiguous().view(b, d, t_t)
321
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
322
- return output, p_attn
323
-
324
- def _matmul_with_relative_values(self, x, y):
325
- """
326
- x: [b, h, l, m]
327
- y: [h or 1, m, d]
328
- ret: [b, h, l, d]
329
- """
330
- ret = torch.matmul(x, y.unsqueeze(0))
331
- return ret
332
-
333
- def _matmul_with_relative_keys(self, x, y):
334
- """
335
- x: [b, h, l, d]
336
- y: [h or 1, m, d]
337
- ret: [b, h, l, m]
338
- """
339
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
340
- return ret
341
-
342
- def _get_relative_embeddings(self, relative_embeddings, length):
343
- 2 * self.window_size + 1
344
- # Pad first before slice to avoid using cond ops.
345
- pad_length = max(length - (self.window_size + 1), 0)
346
- slice_start_position = max((self.window_size + 1) - length, 0)
347
- slice_end_position = slice_start_position + 2 * length - 1
348
- if pad_length > 0:
349
- padded_relative_embeddings = F.pad(
350
- relative_embeddings,
351
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
352
- )
353
- else:
354
- padded_relative_embeddings = relative_embeddings
355
- used_relative_embeddings = padded_relative_embeddings[
356
- :, slice_start_position:slice_end_position
357
- ]
358
- return used_relative_embeddings
359
-
360
- def _relative_position_to_absolute_position(self, x):
361
- """
362
- x: [b, h, l, 2*l-1]
363
- ret: [b, h, l, l]
364
- """
365
- batch, heads, length, _ = x.size()
366
- # Concat columns of pad to shift from relative to absolute indexing.
367
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
368
-
369
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
370
- x_flat = x.view([batch, heads, length * 2 * length])
371
- x_flat = F.pad(
372
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
373
- )
374
-
375
- # Reshape and slice out the padded elements.
376
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
377
- :, :, :length, length - 1 :
378
- ]
379
- return x_final
380
-
381
- def _absolute_position_to_relative_position(self, x):
382
- """
383
- x: [b, h, l, l]
384
- ret: [b, h, l, 2*l-1]
385
- """
386
- batch, heads, length, _ = x.size()
387
- # pad along column
388
- x = F.pad(
389
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
390
- )
391
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
392
- # add 0's in the beginning that will skew the elements after reshape
393
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
394
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
395
- return x_final
396
-
397
- def _attention_bias_proximal(self, length):
398
- """Bias for self-attention to encourage attention to close positions.
399
- Args:
400
- length: an integer scalar.
401
- Returns:
402
- a Tensor with shape [1, 1, length, length]
403
- """
404
- r = torch.arange(length, dtype=torch.float32)
405
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
406
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
407
-
408
-
409
- class FFN(nn.Module):
410
- def __init__(
411
- self,
412
- in_channels,
413
- out_channels,
414
- filter_channels,
415
- kernel_size,
416
- p_dropout=0.0,
417
- activation=None,
418
- causal=False,
419
- ):
420
- super().__init__()
421
- self.in_channels = in_channels
422
- self.out_channels = out_channels
423
- self.filter_channels = filter_channels
424
- self.kernel_size = kernel_size
425
- self.p_dropout = p_dropout
426
- self.activation = activation
427
- self.causal = causal
428
-
429
- if causal:
430
- self.padding = self._causal_padding
431
- else:
432
- self.padding = self._same_padding
433
-
434
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
435
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
436
- self.drop = nn.Dropout(p_dropout)
437
-
438
- def forward(self, x, x_mask):
439
- x = self.conv_1(self.padding(x * x_mask))
440
- if self.activation == "gelu":
441
- x = x * torch.sigmoid(1.702 * x)
442
- else:
443
- x = torch.relu(x)
444
- x = self.drop(x)
445
- x = self.conv_2(self.padding(x * x_mask))
446
- return x * x_mask
447
-
448
- def _causal_padding(self, x):
449
- if self.kernel_size == 1:
450
- return x
451
- pad_l = self.kernel_size - 1
452
- pad_r = 0
453
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
454
- x = F.pad(x, commons.convert_pad_shape(padding))
455
- return x
456
-
457
- def _same_padding(self, x):
458
- if self.kernel_size == 1:
459
- return x
460
- pad_l = (self.kernel_size - 1) // 2
461
- pad_r = self.kernel_size // 2
462
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
463
- x = F.pad(x, commons.convert_pad_shape(padding))
464
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/losses.py DELETED
@@ -1,58 +0,0 @@
1
- import torch
2
-
3
-
4
- def feature_loss(fmap_r, fmap_g):
5
- loss = 0
6
- for dr, dg in zip(fmap_r, fmap_g):
7
- for rl, gl in zip(dr, dg):
8
- rl = rl.float().detach()
9
- gl = gl.float()
10
- loss += torch.mean(torch.abs(rl - gl))
11
-
12
- return loss * 2
13
-
14
-
15
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
16
- loss = 0
17
- r_losses = []
18
- g_losses = []
19
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
20
- dr = dr.float()
21
- dg = dg.float()
22
- r_loss = torch.mean((1 - dr) ** 2)
23
- g_loss = torch.mean(dg**2)
24
- loss += r_loss + g_loss
25
- r_losses.append(r_loss.item())
26
- g_losses.append(g_loss.item())
27
-
28
- return loss, r_losses, g_losses
29
-
30
-
31
- def generator_loss(disc_outputs):
32
- loss = 0
33
- gen_losses = []
34
- for dg in disc_outputs:
35
- dg = dg.float()
36
- l = torch.mean((1 - dg) ** 2)
37
- gen_losses.append(l)
38
- loss += l
39
-
40
- return loss, gen_losses
41
-
42
-
43
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
44
- """
45
- z_p, logs_q: [b, h, t_t]
46
- m_p, logs_p: [b, h, t_t]
47
- """
48
- z_p = z_p.float()
49
- logs_q = logs_q.float()
50
- m_p = m_p.float()
51
- logs_p = logs_p.float()
52
- z_mask = z_mask.float()
53
-
54
- kl = logs_p - logs_q - 0.5
55
- kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
56
- kl = torch.sum(kl * z_mask)
57
- l = kl / torch.sum(z_mask)
58
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: MMS
3
- emoji: ⚡
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.32.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc-by-nc-4.0
11
- duplicated_from: facebook/MMS
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/Deploy_Restoration/net/SGFMT.py DELETED
@@ -1,126 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Author : Lintao Peng
3
- # @File : SGFMT.py
4
- # coding=utf-8
5
- # Design based on the Vit
6
-
7
- import torch.nn as nn
8
- from net.IntmdSequential import IntermediateSequential
9
-
10
-
11
- #实现了自注意力机制,相当于unet的bottleneck层
12
- class SelfAttention(nn.Module):
13
- def __init__(
14
- self, dim, heads=8, qkv_bias=False, qk_scale=None, dropout_rate=0.0
15
- ):
16
- super().__init__()
17
- self.num_heads = heads
18
- head_dim = dim // heads
19
- self.scale = qk_scale or head_dim ** -0.5
20
-
21
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
22
- self.attn_drop = nn.Dropout(dropout_rate)
23
- self.proj = nn.Linear(dim, dim)
24
- self.proj_drop = nn.Dropout(dropout_rate)
25
-
26
- def forward(self, x):
27
- B, N, C = x.shape
28
- qkv = (
29
- self.qkv(x)
30
- .reshape(B, N, 3, self.num_heads, C // self.num_heads)
31
- .permute(2, 0, 3, 1, 4)
32
- )
33
- q, k, v = (
34
- qkv[0],
35
- qkv[1],
36
- qkv[2],
37
- ) # make torchscript happy (cannot use tensor as tuple)
38
-
39
- attn = (q @ k.transpose(-2, -1)) * self.scale
40
- attn = attn.softmax(dim=-1)
41
- attn = self.attn_drop(attn)
42
-
43
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
44
- x = self.proj(x)
45
- x = self.proj_drop(x)
46
- return x
47
-
48
-
49
- class Residual(nn.Module):
50
- def __init__(self, fn):
51
- super().__init__()
52
- self.fn = fn
53
-
54
- def forward(self, x):
55
- return self.fn(x) + x
56
-
57
-
58
- class PreNorm(nn.Module):
59
- def __init__(self, dim, fn):
60
- super().__init__()
61
- self.norm = nn.LayerNorm(dim)
62
- self.fn = fn
63
-
64
- def forward(self, x):
65
- return self.fn(self.norm(x))
66
-
67
-
68
- class PreNormDrop(nn.Module):
69
- def __init__(self, dim, dropout_rate, fn):
70
- super().__init__()
71
- self.norm = nn.LayerNorm(dim)
72
- self.dropout = nn.Dropout(p=dropout_rate)
73
- self.fn = fn
74
-
75
- def forward(self, x):
76
- return self.dropout(self.fn(self.norm(x)))
77
-
78
-
79
- class FeedForward(nn.Module):
80
- def __init__(self, dim, hidden_dim, dropout_rate):
81
- super().__init__()
82
- self.net = nn.Sequential(
83
- nn.Linear(dim, hidden_dim),
84
- nn.GELU(),
85
- nn.Dropout(p=dropout_rate),
86
- nn.Linear(hidden_dim, dim),
87
- nn.Dropout(p=dropout_rate),
88
- )
89
-
90
- def forward(self, x):
91
- return self.net(x)
92
-
93
-
94
- class TransformerModel(nn.Module):
95
- def __init__(
96
- self,
97
- dim, #512
98
- depth, #4
99
- heads, #8
100
- mlp_dim, #4096
101
- dropout_rate=0.1,
102
- attn_dropout_rate=0.1,
103
- ):
104
- super().__init__()
105
- layers = []
106
- for _ in range(depth):
107
- layers.extend(
108
- [
109
- Residual(
110
- PreNormDrop(
111
- dim,
112
- dropout_rate,
113
- SelfAttention(dim, heads=heads, dropout_rate=attn_dropout_rate),
114
- )
115
- ),
116
- Residual(
117
- PreNorm(dim, FeedForward(dim, mlp_dim, dropout_rate))
118
- ),
119
- ]
120
- )
121
- # dim = dim / 2
122
- self.net = IntermediateSequential(*layers)
123
-
124
-
125
- def forward(self, x):
126
- return self.net(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/theme.py DELETED
@@ -1,152 +0,0 @@
1
- import gradio as gr
2
-
3
- # gradio可用颜色列表
4
- # gr.themes.utils.colors.slate (石板色)
5
- # gr.themes.utils.colors.gray (灰色)
6
- # gr.themes.utils.colors.zinc (锌色)
7
- # gr.themes.utils.colors.neutral (中性色)
8
- # gr.themes.utils.colors.stone (石头色)
9
- # gr.themes.utils.colors.red (红色)
10
- # gr.themes.utils.colors.orange (橙色)
11
- # gr.themes.utils.colors.amber (琥珀色)
12
- # gr.themes.utils.colors.yellow (黄色)
13
- # gr.themes.utils.colors.lime (酸橙色)
14
- # gr.themes.utils.colors.green (绿色)
15
- # gr.themes.utils.colors.emerald (祖母绿)
16
- # gr.themes.utils.colors.teal (青蓝色)
17
- # gr.themes.utils.colors.cyan (青色)
18
- # gr.themes.utils.colors.sky (天蓝色)
19
- # gr.themes.utils.colors.blue (蓝色)
20
- # gr.themes.utils.colors.indigo (靛蓝色)
21
- # gr.themes.utils.colors.violet (紫罗兰色)
22
- # gr.themes.utils.colors.purple (紫色)
23
- # gr.themes.utils.colors.fuchsia (洋红色)
24
- # gr.themes.utils.colors.pink (粉红色)
25
- # gr.themes.utils.colors.rose (玫瑰色)
26
-
27
- def adjust_theme():
28
- try:
29
- color_er = gr.themes.utils.colors.pink
30
- set_theme = gr.themes.Default(
31
- primary_hue=gr.themes.utils.colors.orange,
32
- neutral_hue=gr.themes.utils.colors.gray,
33
- font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
34
- font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
35
- set_theme.set(
36
- # Colors
37
- input_background_fill_dark="*neutral_800",
38
- # Transition
39
- button_transition="none",
40
- # Shadows
41
- button_shadow="*shadow_drop",
42
- button_shadow_hover="*shadow_drop_lg",
43
- button_shadow_active="*shadow_inset",
44
- input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
45
- input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
46
- input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
47
- checkbox_label_shadow="*shadow_drop",
48
- block_shadow="*shadow_drop",
49
- form_gap_width="1px",
50
- # Button borders
51
- input_border_width="1px",
52
- input_background_fill="white",
53
- # Gradients
54
- stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
55
- stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
56
- error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
57
- error_background_fill_dark="*background_fill_primary",
58
- checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
59
- checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
60
- checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
61
- checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
62
- button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
63
- button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
64
- button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
65
- button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
66
- button_primary_border_color_dark="*primary_500",
67
- button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
68
- button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
69
- button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
70
- button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
71
- button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
72
- button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
73
- button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
74
- button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
75
- button_cancel_border_color=color_er.c200,
76
- button_cancel_border_color_dark=color_er.c600,
77
- button_cancel_text_color=color_er.c600,
78
- button_cancel_text_color_dark="white",
79
- )
80
- except:
81
- set_theme = None; print('gradio版本较旧, 不能自定义字体和颜色')
82
- return set_theme
83
-
84
- advanced_css = """
85
- /* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
86
- .markdown-body table {
87
- margin: 1em 0;
88
- border-collapse: collapse;
89
- empty-cells: show;
90
- }
91
-
92
- /* 设置表格单元格的内边距为5px,边框粗细为1.2px,颜色为--border-color-primary. */
93
- .markdown-body th, .markdown-body td {
94
- border: 1.2px solid var(--border-color-primary);
95
- padding: 5px;
96
- }
97
-
98
- /* 设置表头背景颜色为rgba(175,184,193,0.2),透明度为0.2. */
99
- .markdown-body thead {
100
- background-color: rgba(175,184,193,0.2);
101
- }
102
-
103
- /* 设置表头单元格的内边距为0.5em和0.2em. */
104
- .markdown-body thead th {
105
- padding: .5em .2em;
106
- }
107
-
108
- /* 去掉列表前缀的默认间距,使其与文本线对齐. */
109
- .markdown-body ol, .markdown-body ul {
110
- padding-inline-start: 2em !important;
111
- }
112
-
113
- /* 设定聊天气泡的样式,包括圆角、最大宽度和阴影等. */
114
- [class *= "message"] {
115
- border-radius: var(--radius-xl) !important;
116
- /* padding: var(--spacing-xl) !important; */
117
- /* font-size: var(--text-md) !important; */
118
- /* line-height: var(--line-md) !important; */
119
- /* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
120
- /* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
121
- }
122
- [data-testid = "bot"] {
123
- max-width: 95%;
124
- /* width: auto !important; */
125
- border-bottom-left-radius: 0 !important;
126
- }
127
- [data-testid = "user"] {
128
- max-width: 100%;
129
- /* width: auto !important; */
130
- border-bottom-right-radius: 0 !important;
131
- }
132
-
133
- /* 行内代码的背景设为淡灰色,设定圆角和间距. */
134
- .markdown-body code {
135
- display: inline;
136
- white-space: break-spaces;
137
- border-radius: 6px;
138
- margin: 0 2px 0 2px;
139
- padding: .2em .4em .1em .4em;
140
- background-color: rgba(175,184,193,0.2);
141
- }
142
- /* 设定代码块的样式,包括背景颜色、内、外边距、圆角。 */
143
- .markdown-body pre code {
144
- display: block;
145
- overflow: auto;
146
- white-space: pre;
147
- background-color: rgba(175,184,193,0.2);
148
- border-radius: 10px;
149
- padding: 1em;
150
- margin: 1em 2em 1em 0.5em;
151
- }
152
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py DELETED
@@ -1,747 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import numpy as np
20
- import PIL
21
- import torch
22
- from packaging import version
23
- from transformers import CLIPImageProcessor, XLMRobertaTokenizer
24
-
25
- from diffusers.utils import is_accelerate_available, is_accelerate_version
26
-
27
- from ...configuration_utils import FrozenDict
28
- from ...image_processor import VaeImageProcessor
29
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
30
- from ...models import AutoencoderKL, UNet2DConditionModel
31
- from ...schedulers import KarrasDiffusionSchedulers
32
- from ...utils import PIL_INTERPOLATION, deprecate, logging, randn_tensor, replace_example_docstring
33
- from ..pipeline_utils import DiffusionPipeline
34
- from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
35
- from . import AltDiffusionPipelineOutput, RobertaSeriesModelWithTransformation
36
-
37
-
38
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
-
40
- EXAMPLE_DOC_STRING = """
41
- Examples:
42
- ```py
43
- >>> import requests
44
- >>> import torch
45
- >>> from PIL import Image
46
- >>> from io import BytesIO
47
-
48
- >>> from diffusers import AltDiffusionImg2ImgPipeline
49
-
50
- >>> device = "cuda"
51
- >>> model_id_or_path = "BAAI/AltDiffusion-m9"
52
- >>> pipe = AltDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
53
- >>> pipe = pipe.to(device)
54
-
55
- >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
56
-
57
- >>> response = requests.get(url)
58
- >>> init_image = Image.open(BytesIO(response.content)).convert("RGB")
59
- >>> init_image = init_image.resize((768, 512))
60
-
61
- >>> # "A fantasy landscape, trending on artstation"
62
- >>> prompt = "幻想风景, artstation"
63
-
64
- >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
65
- >>> images[0].save("幻想风景.png")
66
- ```
67
- """
68
-
69
-
70
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
71
- def preprocess(image):
72
- warnings.warn(
73
- "The preprocess method is deprecated and will be removed in a future version. Please"
74
- " use VaeImageProcessor.preprocess instead",
75
- FutureWarning,
76
- )
77
- if isinstance(image, torch.Tensor):
78
- return image
79
- elif isinstance(image, PIL.Image.Image):
80
- image = [image]
81
-
82
- if isinstance(image[0], PIL.Image.Image):
83
- w, h = image[0].size
84
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
85
-
86
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
87
- image = np.concatenate(image, axis=0)
88
- image = np.array(image).astype(np.float32) / 255.0
89
- image = image.transpose(0, 3, 1, 2)
90
- image = 2.0 * image - 1.0
91
- image = torch.from_numpy(image)
92
- elif isinstance(image[0], torch.Tensor):
93
- image = torch.cat(image, dim=0)
94
- return image
95
-
96
-
97
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker
98
- class AltDiffusionImg2ImgPipeline(
99
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
100
- ):
101
- r"""
102
- Pipeline for text-guided image-to-image generation using Alt Diffusion.
103
-
104
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
105
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
106
-
107
- The pipeline also inherits the following loading methods:
108
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
109
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
110
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
111
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
112
-
113
- Args:
114
- vae ([`AutoencoderKL`]):
115
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
116
- text_encoder ([`~transformers.RobertaSeriesModelWithTransformation`]):
117
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
118
- tokenizer ([`~transformers.XLMRobertaTokenizer`]):
119
- A `XLMRobertaTokenizer` to tokenize text.
120
- unet ([`UNet2DConditionModel`]):
121
- A `UNet2DConditionModel` to denoise the encoded image latents.
122
- scheduler ([`SchedulerMixin`]):
123
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
124
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
125
- safety_checker ([`StableDiffusionSafetyChecker`]):
126
- Classification module that estimates whether generated images could be considered offensive or harmful.
127
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
128
- about a model's potential harms.
129
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
130
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
131
- """
132
- _optional_components = ["safety_checker", "feature_extractor"]
133
-
134
- def __init__(
135
- self,
136
- vae: AutoencoderKL,
137
- text_encoder: RobertaSeriesModelWithTransformation,
138
- tokenizer: XLMRobertaTokenizer,
139
- unet: UNet2DConditionModel,
140
- scheduler: KarrasDiffusionSchedulers,
141
- safety_checker: StableDiffusionSafetyChecker,
142
- feature_extractor: CLIPImageProcessor,
143
- requires_safety_checker: bool = True,
144
- ):
145
- super().__init__()
146
-
147
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
148
- deprecation_message = (
149
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
150
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
151
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
152
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
153
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
154
- " file"
155
- )
156
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
157
- new_config = dict(scheduler.config)
158
- new_config["steps_offset"] = 1
159
- scheduler._internal_dict = FrozenDict(new_config)
160
-
161
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
162
- deprecation_message = (
163
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
164
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
165
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
166
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
167
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
168
- )
169
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
170
- new_config = dict(scheduler.config)
171
- new_config["clip_sample"] = False
172
- scheduler._internal_dict = FrozenDict(new_config)
173
-
174
- if safety_checker is None and requires_safety_checker:
175
- logger.warning(
176
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
177
- " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered"
178
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
179
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
180
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
181
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
182
- )
183
-
184
- if safety_checker is not None and feature_extractor is None:
185
- raise ValueError(
186
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
187
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
188
- )
189
-
190
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
191
- version.parse(unet.config._diffusers_version).base_version
192
- ) < version.parse("0.9.0.dev0")
193
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
194
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
195
- deprecation_message = (
196
- "The configuration file of the unet has set the default `sample_size` to smaller than"
197
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
198
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
199
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
200
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
201
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
202
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
203
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
204
- " the `unet/config.json` file"
205
- )
206
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
207
- new_config = dict(unet.config)
208
- new_config["sample_size"] = 64
209
- unet._internal_dict = FrozenDict(new_config)
210
-
211
- self.register_modules(
212
- vae=vae,
213
- text_encoder=text_encoder,
214
- tokenizer=tokenizer,
215
- unet=unet,
216
- scheduler=scheduler,
217
- safety_checker=safety_checker,
218
- feature_extractor=feature_extractor,
219
- )
220
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
221
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
222
- self.register_to_config(requires_safety_checker=requires_safety_checker)
223
-
224
- def enable_model_cpu_offload(self, gpu_id=0):
225
- r"""
226
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
227
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
228
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
229
- iterative execution of the `unet`.
230
- """
231
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
232
- from accelerate import cpu_offload_with_hook
233
- else:
234
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
235
-
236
- device = torch.device(f"cuda:{gpu_id}")
237
-
238
- if self.device.type != "cpu":
239
- self.to("cpu", silence_dtype_warnings=True)
240
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
241
-
242
- hook = None
243
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
244
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
245
-
246
- if self.safety_checker is not None:
247
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
248
-
249
- # We'll offload the last model manually.
250
- self.final_offload_hook = hook
251
-
252
- def _encode_prompt(
253
- self,
254
- prompt,
255
- device,
256
- num_images_per_prompt,
257
- do_classifier_free_guidance,
258
- negative_prompt=None,
259
- prompt_embeds: Optional[torch.FloatTensor] = None,
260
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
261
- lora_scale: Optional[float] = None,
262
- ):
263
- r"""
264
- Encodes the prompt into text encoder hidden states.
265
-
266
- Args:
267
- prompt (`str` or `List[str]`, *optional*):
268
- prompt to be encoded
269
- device: (`torch.device`):
270
- torch device
271
- num_images_per_prompt (`int`):
272
- number of images that should be generated per prompt
273
- do_classifier_free_guidance (`bool`):
274
- whether to use classifier free guidance or not
275
- negative_prompt (`str` or `List[str]`, *optional*):
276
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
277
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
278
- less than `1`).
279
- prompt_embeds (`torch.FloatTensor`, *optional*):
280
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
281
- provided, text embeddings will be generated from `prompt` input argument.
282
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
283
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
284
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
285
- argument.
286
- lora_scale (`float`, *optional*):
287
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
288
- """
289
- # set lora scale so that monkey patched LoRA
290
- # function of text encoder can correctly access it
291
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
292
- self._lora_scale = lora_scale
293
-
294
- if prompt is not None and isinstance(prompt, str):
295
- batch_size = 1
296
- elif prompt is not None and isinstance(prompt, list):
297
- batch_size = len(prompt)
298
- else:
299
- batch_size = prompt_embeds.shape[0]
300
-
301
- if prompt_embeds is None:
302
- # textual inversion: procecss multi-vector tokens if necessary
303
- if isinstance(self, TextualInversionLoaderMixin):
304
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
305
-
306
- text_inputs = self.tokenizer(
307
- prompt,
308
- padding="max_length",
309
- max_length=self.tokenizer.model_max_length,
310
- truncation=True,
311
- return_tensors="pt",
312
- )
313
- text_input_ids = text_inputs.input_ids
314
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
315
-
316
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
317
- text_input_ids, untruncated_ids
318
- ):
319
- removed_text = self.tokenizer.batch_decode(
320
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
321
- )
322
- logger.warning(
323
- "The following part of your input was truncated because CLIP can only handle sequences up to"
324
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
325
- )
326
-
327
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
328
- attention_mask = text_inputs.attention_mask.to(device)
329
- else:
330
- attention_mask = None
331
-
332
- prompt_embeds = self.text_encoder(
333
- text_input_ids.to(device),
334
- attention_mask=attention_mask,
335
- )
336
- prompt_embeds = prompt_embeds[0]
337
-
338
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
339
-
340
- bs_embed, seq_len, _ = prompt_embeds.shape
341
- # duplicate text embeddings for each generation per prompt, using mps friendly method
342
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
343
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
344
-
345
- # get unconditional embeddings for classifier free guidance
346
- if do_classifier_free_guidance and negative_prompt_embeds is None:
347
- uncond_tokens: List[str]
348
- if negative_prompt is None:
349
- uncond_tokens = [""] * batch_size
350
- elif prompt is not None and type(prompt) is not type(negative_prompt):
351
- raise TypeError(
352
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
353
- f" {type(prompt)}."
354
- )
355
- elif isinstance(negative_prompt, str):
356
- uncond_tokens = [negative_prompt]
357
- elif batch_size != len(negative_prompt):
358
- raise ValueError(
359
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
360
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
361
- " the batch size of `prompt`."
362
- )
363
- else:
364
- uncond_tokens = negative_prompt
365
-
366
- # textual inversion: procecss multi-vector tokens if necessary
367
- if isinstance(self, TextualInversionLoaderMixin):
368
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
369
-
370
- max_length = prompt_embeds.shape[1]
371
- uncond_input = self.tokenizer(
372
- uncond_tokens,
373
- padding="max_length",
374
- max_length=max_length,
375
- truncation=True,
376
- return_tensors="pt",
377
- )
378
-
379
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
380
- attention_mask = uncond_input.attention_mask.to(device)
381
- else:
382
- attention_mask = None
383
-
384
- negative_prompt_embeds = self.text_encoder(
385
- uncond_input.input_ids.to(device),
386
- attention_mask=attention_mask,
387
- )
388
- negative_prompt_embeds = negative_prompt_embeds[0]
389
-
390
- if do_classifier_free_guidance:
391
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
392
- seq_len = negative_prompt_embeds.shape[1]
393
-
394
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
395
-
396
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
397
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
398
-
399
- # For classifier free guidance, we need to do two forward passes.
400
- # Here we concatenate the unconditional and text embeddings into a single batch
401
- # to avoid doing two forward passes
402
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
403
-
404
- return prompt_embeds
405
-
406
- def run_safety_checker(self, image, device, dtype):
407
- if self.safety_checker is None:
408
- has_nsfw_concept = None
409
- else:
410
- if torch.is_tensor(image):
411
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
412
- else:
413
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
414
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
415
- image, has_nsfw_concept = self.safety_checker(
416
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
417
- )
418
- return image, has_nsfw_concept
419
-
420
- def decode_latents(self, latents):
421
- warnings.warn(
422
- (
423
- "The decode_latents method is deprecated and will be removed in a future version. Please"
424
- " use VaeImageProcessor instead"
425
- ),
426
- FutureWarning,
427
- )
428
- latents = 1 / self.vae.config.scaling_factor * latents
429
- image = self.vae.decode(latents, return_dict=False)[0]
430
- image = (image / 2 + 0.5).clamp(0, 1)
431
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
432
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
433
- return image
434
-
435
- def prepare_extra_step_kwargs(self, generator, eta):
436
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
437
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
438
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
439
- # and should be between [0, 1]
440
-
441
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
442
- extra_step_kwargs = {}
443
- if accepts_eta:
444
- extra_step_kwargs["eta"] = eta
445
-
446
- # check if the scheduler accepts generator
447
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
448
- if accepts_generator:
449
- extra_step_kwargs["generator"] = generator
450
- return extra_step_kwargs
451
-
452
- def check_inputs(
453
- self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
454
- ):
455
- if strength < 0 or strength > 1:
456
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
457
-
458
- if (callback_steps is None) or (
459
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
460
- ):
461
- raise ValueError(
462
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
463
- f" {type(callback_steps)}."
464
- )
465
-
466
- if prompt is not None and prompt_embeds is not None:
467
- raise ValueError(
468
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
469
- " only forward one of the two."
470
- )
471
- elif prompt is None and prompt_embeds is None:
472
- raise ValueError(
473
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
474
- )
475
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
476
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
477
-
478
- if negative_prompt is not None and negative_prompt_embeds is not None:
479
- raise ValueError(
480
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
481
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
482
- )
483
-
484
- if prompt_embeds is not None and negative_prompt_embeds is not None:
485
- if prompt_embeds.shape != negative_prompt_embeds.shape:
486
- raise ValueError(
487
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
488
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
489
- f" {negative_prompt_embeds.shape}."
490
- )
491
-
492
- def get_timesteps(self, num_inference_steps, strength, device):
493
- # get the original timestep using init_timestep
494
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
495
-
496
- t_start = max(num_inference_steps - init_timestep, 0)
497
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
498
-
499
- return timesteps, num_inference_steps - t_start
500
-
501
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
502
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
503
- raise ValueError(
504
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
505
- )
506
-
507
- image = image.to(device=device, dtype=dtype)
508
-
509
- batch_size = batch_size * num_images_per_prompt
510
-
511
- if image.shape[1] == 4:
512
- init_latents = image
513
-
514
- else:
515
- if isinstance(generator, list) and len(generator) != batch_size:
516
- raise ValueError(
517
- f"You have passed a list of generators of length {len(generator)}, but requested an effective"
518
- f" batch size of {batch_size}. Make sure the batch size matches the length of the generators."
519
- )
520
-
521
- elif isinstance(generator, list):
522
- init_latents = [
523
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
524
- ]
525
- init_latents = torch.cat(init_latents, dim=0)
526
- else:
527
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
528
-
529
- init_latents = self.vae.config.scaling_factor * init_latents
530
-
531
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
532
- # expand init_latents for batch_size
533
- deprecation_message = (
534
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
535
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
536
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
537
- " your script to pass as many initial images as text prompts to suppress this warning."
538
- )
539
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
540
- additional_image_per_prompt = batch_size // init_latents.shape[0]
541
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
542
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
543
- raise ValueError(
544
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
545
- )
546
- else:
547
- init_latents = torch.cat([init_latents], dim=0)
548
-
549
- shape = init_latents.shape
550
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
551
-
552
- # get latents
553
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
554
- latents = init_latents
555
-
556
- return latents
557
-
558
- @torch.no_grad()
559
- @replace_example_docstring(EXAMPLE_DOC_STRING)
560
- def __call__(
561
- self,
562
- prompt: Union[str, List[str]] = None,
563
- image: Union[
564
- torch.FloatTensor,
565
- PIL.Image.Image,
566
- np.ndarray,
567
- List[torch.FloatTensor],
568
- List[PIL.Image.Image],
569
- List[np.ndarray],
570
- ] = None,
571
- strength: float = 0.8,
572
- num_inference_steps: Optional[int] = 50,
573
- guidance_scale: Optional[float] = 7.5,
574
- negative_prompt: Optional[Union[str, List[str]]] = None,
575
- num_images_per_prompt: Optional[int] = 1,
576
- eta: Optional[float] = 0.0,
577
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
578
- prompt_embeds: Optional[torch.FloatTensor] = None,
579
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
580
- output_type: Optional[str] = "pil",
581
- return_dict: bool = True,
582
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
583
- callback_steps: int = 1,
584
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
585
- ):
586
- r"""
587
- The call function to the pipeline for generation.
588
-
589
- Args:
590
- prompt (`str` or `List[str]`, *optional*):
591
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
592
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
593
- `Image` or tensor representing an image batch to be used as the starting point. Can also accept image
594
- latents as `image`, but if passing latents directly it is not encoded again.
595
- strength (`float`, *optional*, defaults to 0.8):
596
- Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
597
- starting point and more noise is added the higher the `strength`. The number of denoising steps depends
598
- on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
599
- process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
600
- essentially ignores `image`.
601
- num_inference_steps (`int`, *optional*, defaults to 50):
602
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
603
- expense of slower inference. This parameter is modulated by `strength`.
604
- guidance_scale (`float`, *optional*, defaults to 7.5):
605
- A higher guidance scale value encourages the model to generate images closely linked to the text
606
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
607
- negative_prompt (`str` or `List[str]`, *optional*):
608
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
609
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
610
- num_images_per_prompt (`int`, *optional*, defaults to 1):
611
- The number of images to generate per prompt.
612
- eta (`float`, *optional*, defaults to 0.0):
613
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
614
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
615
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
616
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
617
- generation deterministic.
618
- prompt_embeds (`torch.FloatTensor`, *optional*):
619
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
620
- provided, text embeddings are generated from the `prompt` input argument.
621
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
622
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
623
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
624
- output_type (`str`, *optional*, defaults to `"pil"`):
625
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
626
- return_dict (`bool`, *optional*, defaults to `True`):
627
- Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a
628
- plain tuple.
629
- callback (`Callable`, *optional*):
630
- A function that calls every `callback_steps` steps during inference. The function is called with the
631
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
632
- callback_steps (`int`, *optional*, defaults to 1):
633
- The frequency at which the `callback` function is called. If not specified, the callback is called at
634
- every step.
635
- cross_attention_kwargs (`dict`, *optional*):
636
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
637
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
638
-
639
- Examples:
640
-
641
- Returns:
642
- [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`:
643
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned,
644
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
645
- second element is a list of `bool`s indicating whether the corresponding generated image contains
646
- "not-safe-for-work" (nsfw) content.
647
- """
648
- # 1. Check inputs. Raise error if not correct
649
- self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
650
-
651
- # 2. Define call parameters
652
- if prompt is not None and isinstance(prompt, str):
653
- batch_size = 1
654
- elif prompt is not None and isinstance(prompt, list):
655
- batch_size = len(prompt)
656
- else:
657
- batch_size = prompt_embeds.shape[0]
658
- device = self._execution_device
659
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
660
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
661
- # corresponds to doing no classifier free guidance.
662
- do_classifier_free_guidance = guidance_scale > 1.0
663
-
664
- # 3. Encode input prompt
665
- text_encoder_lora_scale = (
666
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
667
- )
668
- prompt_embeds = self._encode_prompt(
669
- prompt,
670
- device,
671
- num_images_per_prompt,
672
- do_classifier_free_guidance,
673
- negative_prompt,
674
- prompt_embeds=prompt_embeds,
675
- negative_prompt_embeds=negative_prompt_embeds,
676
- lora_scale=text_encoder_lora_scale,
677
- )
678
-
679
- # 4. Preprocess image
680
- image = self.image_processor.preprocess(image)
681
-
682
- # 5. set timesteps
683
- self.scheduler.set_timesteps(num_inference_steps, device=device)
684
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
685
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
686
-
687
- # 6. Prepare latent variables
688
- latents = self.prepare_latents(
689
- image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
690
- )
691
-
692
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
693
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
694
-
695
- # 8. Denoising loop
696
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
697
- with self.progress_bar(total=num_inference_steps) as progress_bar:
698
- for i, t in enumerate(timesteps):
699
- # expand the latents if we are doing classifier free guidance
700
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
701
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
702
-
703
- # predict the noise residual
704
- noise_pred = self.unet(
705
- latent_model_input,
706
- t,
707
- encoder_hidden_states=prompt_embeds,
708
- cross_attention_kwargs=cross_attention_kwargs,
709
- return_dict=False,
710
- )[0]
711
-
712
- # perform guidance
713
- if do_classifier_free_guidance:
714
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
715
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
716
-
717
- # compute the previous noisy sample x_t -> x_t-1
718
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
719
-
720
- # call the callback, if provided
721
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
722
- progress_bar.update()
723
- if callback is not None and i % callback_steps == 0:
724
- callback(i, t, latents)
725
-
726
- if not output_type == "latent":
727
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
728
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
729
- else:
730
- image = latents
731
- has_nsfw_concept = None
732
-
733
- if has_nsfw_concept is None:
734
- do_denormalize = [True] * image.shape[0]
735
- else:
736
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
737
-
738
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
739
-
740
- # Offload last model to CPU
741
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
742
- self.final_offload_hook.offload()
743
-
744
- if not return_dict:
745
- return (image, has_nsfw_concept)
746
-
747
- return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://res2net101_v1d_26w_4s',
4
- backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26))
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/gcnet_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_video_demo/app.py DELETED
@@ -1,127 +0,0 @@
1
- import os
2
-
3
- import torch
4
- import numpy as np
5
- import torch.nn.functional as F
6
- import torchvision.transforms as T
7
- from PIL import Image
8
- from decord import VideoReader
9
- from decord import cpu
10
- from uniformer import uniformer_small
11
- from kinetics_class_index import kinetics_classnames
12
- from transforms import (
13
- GroupNormalize, GroupScale, GroupCenterCrop,
14
- Stack, ToTorchFormatTensor
15
- )
16
-
17
- import gradio as gr
18
- from huggingface_hub import hf_hub_download
19
-
20
- # Device on which to run the model
21
- # Set to cuda to load on GPU
22
- device = "cpu"
23
- # os.system("wget https://cdn-lfs.huggingface.co/Andy1621/uniformer/d5fd7b0c49ee6a5422ef5d0c884d962c742003bfbd900747485eb99fa269d0db")
24
- model_path = hf_hub_download(repo_id="Andy1621/uniformer", filename="uniformer_small_k400_16x8.pth")
25
- # Pick a pretrained model
26
- model = uniformer_small()
27
- # state_dict = torch.load('d5fd7b0c49ee6a5422ef5d0c884d962c742003bfbd900747485eb99fa269d0db', map_location='cpu')
28
- state_dict = torch.load(model_path, map_location='cpu')
29
- model.load_state_dict(state_dict)
30
-
31
- # Set to eval mode and move to desired device
32
- model = model.to(device)
33
- model = model.eval()
34
-
35
- # Create an id to label name mapping
36
- kinetics_id_to_classname = {}
37
- for k, v in kinetics_classnames.items():
38
- kinetics_id_to_classname[k] = v
39
-
40
-
41
- def get_index(num_frames, num_segments=16, dense_sample_rate=8):
42
- sample_range = num_segments * dense_sample_rate
43
- sample_pos = max(1, 1 + num_frames - sample_range)
44
- t_stride = dense_sample_rate
45
- start_idx = 0 if sample_pos == 1 else sample_pos // 2
46
- offsets = np.array([
47
- (idx * t_stride + start_idx) %
48
- num_frames for idx in range(num_segments)
49
- ])
50
- return offsets + 1
51
-
52
-
53
- def load_video(video_path):
54
- vr = VideoReader(video_path, ctx=cpu(0))
55
- num_frames = len(vr)
56
- frame_indices = get_index(num_frames, 16, 16)
57
-
58
- # transform
59
- crop_size = 224
60
- scale_size = 256
61
- input_mean = [0.485, 0.456, 0.406]
62
- input_std = [0.229, 0.224, 0.225]
63
-
64
- transform = T.Compose([
65
- GroupScale(int(scale_size)),
66
- GroupCenterCrop(crop_size),
67
- Stack(),
68
- ToTorchFormatTensor(),
69
- GroupNormalize(input_mean, input_std)
70
- ])
71
-
72
- images_group = list()
73
- for frame_index in frame_indices:
74
- img = Image.fromarray(vr[frame_index].asnumpy())
75
- images_group.append(img)
76
- torch_imgs = transform(images_group)
77
- return torch_imgs
78
-
79
-
80
- def inference(video):
81
- vid = load_video(video)
82
-
83
- # The model expects inputs of shape: B x C x H x W
84
- TC, H, W = vid.shape
85
- inputs = vid.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4)
86
-
87
- prediction = model(inputs)
88
- prediction = F.softmax(prediction, dim=1).flatten()
89
-
90
- return {kinetics_id_to_classname[str(i)]: float(prediction[i]) for i in range(400)}
91
-
92
-
93
- def set_example_video(example: list) -> dict:
94
- return gr.Video.update(value=example[0])
95
-
96
-
97
- demo = gr.Blocks()
98
- with demo:
99
- gr.Markdown(
100
- """
101
- # UniFormer-S
102
- Gradio demo for <a href='https://github.com/Sense-X/UniFormer' target='_blank'>UniFormer</a>: To use it, simply upload your video, or click one of the examples to load them. Read more at the links below.
103
- """
104
- )
105
-
106
- with gr.Box():
107
- with gr.Row():
108
- with gr.Column():
109
- with gr.Row():
110
- input_video = gr.Video(label='Input Video')
111
- with gr.Row():
112
- submit_button = gr.Button('Submit')
113
- with gr.Column():
114
- label = gr.Label(num_top_classes=5)
115
- with gr.Row():
116
- example_videos = gr.Dataset(components=[input_video], samples=[['hitting_baseball.mp4'], ['hoverboarding.mp4'], ['yoga.mp4']])
117
-
118
- gr.Markdown(
119
- """
120
- <p style='text-align: center'><a href='https://arxiv.org/abs/2201.04676' target='_blank'>[ICLR2022] UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>
121
- """
122
- )
123
-
124
- submit_button.click(fn=inference, inputs=input_video, outputs=label)
125
- example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)
126
-
127
- demo.launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/monkey_patch_gptq_lora.py DELETED
@@ -1,39 +0,0 @@
1
- # Copied from https://github.com/johnsmith0031/alpaca_lora_4bit
2
-
3
- from pathlib import Path
4
-
5
- import alpaca_lora_4bit.autograd_4bit as autograd_4bit
6
- from alpaca_lora_4bit.amp_wrapper import AMPWrapper
7
- from alpaca_lora_4bit.autograd_4bit import (
8
- Autograd4bitQuantLinear,
9
- load_llama_model_4bit_low_ram
10
- )
11
- from alpaca_lora_4bit.models import Linear4bitLt
12
- from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
13
- replace_peft_model_with_int4_lora_model
14
- )
15
-
16
- from modules import shared
17
- from modules.GPTQ_loader import find_quantized_model_file
18
-
19
- replace_peft_model_with_int4_lora_model()
20
-
21
-
22
- def load_model_llama(model_name):
23
- config_path = str(Path(f'{shared.args.model_dir}/{model_name}'))
24
- model_path = str(find_quantized_model_file(model_name))
25
- model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False)
26
- for _, m in model.named_modules():
27
- if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
28
- if m.is_v1_model:
29
- m.zeros = m.zeros.half()
30
- m.scales = m.scales.half()
31
- m.bias = m.bias.half()
32
-
33
- autograd_4bit.auto_switch = True
34
-
35
- model.half()
36
- wrapper = AMPWrapper(model)
37
- wrapper.apply_generate()
38
-
39
- return model, tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/utils.py DELETED
@@ -1,189 +0,0 @@
1
- """Utils for monoDepth."""
2
- import sys
3
- import re
4
- import numpy as np
5
- import cv2
6
- import torch
7
-
8
-
9
- def read_pfm(path):
10
- """Read pfm file.
11
-
12
- Args:
13
- path (str): path to file
14
-
15
- Returns:
16
- tuple: (data, scale)
17
- """
18
- with open(path, "rb") as file:
19
-
20
- color = None
21
- width = None
22
- height = None
23
- scale = None
24
- endian = None
25
-
26
- header = file.readline().rstrip()
27
- if header.decode("ascii") == "PF":
28
- color = True
29
- elif header.decode("ascii") == "Pf":
30
- color = False
31
- else:
32
- raise Exception("Not a PFM file: " + path)
33
-
34
- dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
35
- if dim_match:
36
- width, height = list(map(int, dim_match.groups()))
37
- else:
38
- raise Exception("Malformed PFM header.")
39
-
40
- scale = float(file.readline().decode("ascii").rstrip())
41
- if scale < 0:
42
- # little-endian
43
- endian = "<"
44
- scale = -scale
45
- else:
46
- # big-endian
47
- endian = ">"
48
-
49
- data = np.fromfile(file, endian + "f")
50
- shape = (height, width, 3) if color else (height, width)
51
-
52
- data = np.reshape(data, shape)
53
- data = np.flipud(data)
54
-
55
- return data, scale
56
-
57
-
58
- def write_pfm(path, image, scale=1):
59
- """Write pfm file.
60
-
61
- Args:
62
- path (str): pathto file
63
- image (array): data
64
- scale (int, optional): Scale. Defaults to 1.
65
- """
66
-
67
- with open(path, "wb") as file:
68
- color = None
69
-
70
- if image.dtype.name != "float32":
71
- raise Exception("Image dtype must be float32.")
72
-
73
- image = np.flipud(image)
74
-
75
- if len(image.shape) == 3 and image.shape[2] == 3: # color image
76
- color = True
77
- elif (
78
- len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
79
- ): # greyscale
80
- color = False
81
- else:
82
- raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
83
-
84
- file.write("PF\n" if color else "Pf\n".encode())
85
- file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
86
-
87
- endian = image.dtype.byteorder
88
-
89
- if endian == "<" or endian == "=" and sys.byteorder == "little":
90
- scale = -scale
91
-
92
- file.write("%f\n".encode() % scale)
93
-
94
- image.tofile(file)
95
-
96
-
97
- def read_image(path):
98
- """Read image and output RGB image (0-1).
99
-
100
- Args:
101
- path (str): path to file
102
-
103
- Returns:
104
- array: RGB image (0-1)
105
- """
106
- img = cv2.imread(path)
107
-
108
- if img.ndim == 2:
109
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
110
-
111
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
112
-
113
- return img
114
-
115
-
116
- def resize_image(img):
117
- """Resize image and make it fit for network.
118
-
119
- Args:
120
- img (array): image
121
-
122
- Returns:
123
- tensor: data ready for network
124
- """
125
- height_orig = img.shape[0]
126
- width_orig = img.shape[1]
127
-
128
- if width_orig > height_orig:
129
- scale = width_orig / 384
130
- else:
131
- scale = height_orig / 384
132
-
133
- height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
134
- width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
135
-
136
- img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
137
-
138
- img_resized = (
139
- torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
140
- )
141
- img_resized = img_resized.unsqueeze(0)
142
-
143
- return img_resized
144
-
145
-
146
- def resize_depth(depth, width, height):
147
- """Resize depth map and bring to CPU (numpy).
148
-
149
- Args:
150
- depth (tensor): depth
151
- width (int): image width
152
- height (int): image height
153
-
154
- Returns:
155
- array: processed depth
156
- """
157
- depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
158
-
159
- depth_resized = cv2.resize(
160
- depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
161
- )
162
-
163
- return depth_resized
164
-
165
- def write_depth(path, depth, bits=1):
166
- """Write depth map to pfm and png file.
167
-
168
- Args:
169
- path (str): filepath without extension
170
- depth (array): depth
171
- """
172
- write_pfm(path + ".pfm", depth.astype(np.float32))
173
-
174
- depth_min = depth.min()
175
- depth_max = depth.max()
176
-
177
- max_val = (2**(8*bits))-1
178
-
179
- if depth_max - depth_min > np.finfo("float").eps:
180
- out = max_val * (depth - depth_min) / (depth_max - depth_min)
181
- else:
182
- out = np.zeros(depth.shape, dtype=depth.type)
183
-
184
- if bits == 1:
185
- cv2.imwrite(path + ".png", out.astype("uint8"))
186
- elif bits == 2:
187
- cv2.imwrite(path + ".png", out.astype("uint16"))
188
-
189
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/README.md DELETED
@@ -1,174 +0,0 @@
1
- ---
2
- license: mit
3
- sdk: gradio
4
- emoji: 😻
5
- colorTo: green
6
- pinned: true
7
- ---
8
- <div align="center">
9
- <img src="./.asset/grounding_dino_logo.png" width="30%">
10
- </div>
11
-
12
- # :sauropod: Grounding DINO
13
-
14
- [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/zero-shot-object-detection-on-mscoco)](https://paperswithcode.com/sota/zero-shot-object-detection-on-mscoco?p=grounding-dino-marrying-dino-with-grounded) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/zero-shot-object-detection-on-odinw)](https://paperswithcode.com/sota/zero-shot-object-detection-on-odinw?p=grounding-dino-marrying-dino-with-grounded) \
15
- [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/object-detection-on-coco-minival)](https://paperswithcode.com/sota/object-detection-on-coco-minival?p=grounding-dino-marrying-dino-with-grounded) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/object-detection-on-coco)](https://paperswithcode.com/sota/object-detection-on-coco?p=grounding-dino-marrying-dino-with-grounded)
16
-
17
-
18
- **[IDEA-CVR, IDEA-Research](https://github.com/IDEA-Research)**
19
-
20
- [Shilong Liu](http://www.lsl.zone/), [Zhaoyang Zeng](https://scholar.google.com/citations?user=U_cvvUwAAAAJ&hl=zh-CN&oi=ao), [Tianhe Ren](https://rentainhe.github.io/), [Feng Li](https://scholar.google.com/citations?user=ybRe9GcAAAAJ&hl=zh-CN), [Hao Zhang](https://scholar.google.com/citations?user=B8hPxMQAAAAJ&hl=zh-CN), [Jie Yang](https://github.com/yangjie-cv), [Chunyuan Li](https://scholar.google.com/citations?user=Zd7WmXUAAAAJ&hl=zh-CN&oi=ao), [Jianwei Yang](https://jwyang.github.io/), [Hang Su](https://scholar.google.com/citations?hl=en&user=dxN1_X0AAAAJ&view_op=list_works&sortby=pubdate), [Jun Zhu](https://scholar.google.com/citations?hl=en&user=axsP38wAAAAJ), [Lei Zhang](https://www.leizhang.org/)<sup>:email:</sup>.
21
-
22
-
23
- [[`Paper`](https://arxiv.org/abs/2303.05499)] [[`Demo`](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)] [[`BibTex`](#black_nib-citation)]
24
-
25
-
26
- PyTorch implementation and pretrained models for Grounding DINO. For details, see the paper **[Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499)**.
27
-
28
- ## :sun_with_face: Helpful Tutorial
29
-
30
- - :grapes: [[Read our arXiv Paper](https://arxiv.org/abs/2303.05499)]
31
- - :apple: [[Watch our simple introduction video on YouTube](https://youtu.be/wxWDt5UiwY8)]
32
- - :blossom: &nbsp;[[Try the Colab Demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb)]
33
- - :sunflower: [[Try our Official Huggingface Demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)]
34
- - :maple_leaf: [[Watch the Step by Step Tutorial about GroundingDINO by Roboflow AI](https://youtu.be/cMa77r3YrDk)]
35
- - :mushroom: [[GroundingDINO: Automated Dataset Annotation and Evaluation by Roboflow AI](https://youtu.be/C4NqaRBz_Kw)]
36
- - :hibiscus: [[Accelerate Image Annotation with SAM and GroundingDINO by Roboflow AI](https://youtu.be/oEQYStnF2l8)]
37
- - :white_flower: [[Autodistill: Train YOLOv8 with ZERO Annotations based on Grounding-DINO and Grounded-SAM by Roboflow AI](https://github.com/autodistill/autodistill)]
38
-
39
- <!-- Grounding DINO Methods |
40
- [![arXiv](https://img.shields.io/badge/arXiv-2303.05499-b31b1b.svg)](https://arxiv.org/abs/2303.05499)
41
- [![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/wxWDt5UiwY8) -->
42
-
43
- <!-- Grounding DINO Demos |
44
- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) -->
45
- <!-- [![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/cMa77r3YrDk)
46
- [![HuggingFace space](https://img.shields.io/badge/🤗-HuggingFace%20Space-cyan.svg)](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)
47
- [![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/oEQYStnF2l8)
48
- [![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/C4NqaRBz_Kw) -->
49
-
50
- ## :sparkles: Highlight Projects
51
-
52
- - [Semantic-SAM: a universal image segmentation model to enable segment and recognize anything at any desired granularity.](https://github.com/UX-Decoder/Semantic-SAM),
53
- - [DetGPT: Detect What You Need via Reasoning](https://github.com/OptimalScale/DetGPT)
54
- - [Grounded-SAM: Marrying Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)
55
- - [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb)
56
- - [Grounding DINO with GLIGEN for Controllable Image Editing](demo/image_editing_with_groundingdino_gligen.ipynb)
57
- - [OpenSeeD: A Simple and Strong Openset Segmentation Model](https://github.com/IDEA-Research/OpenSeeD)
58
- - [SEEM: Segment Everything Everywhere All at Once](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once)
59
- - [X-GPT: Conversational Visual Agent supported by X-Decoder](https://github.com/microsoft/X-Decoder/tree/xgpt)
60
- - [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://github.com/gligen/GLIGEN)
61
- - [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA)
62
-
63
- <!-- Extensions | [Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything); [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb); [Grounding DINO with GLIGEN](demo/image_editing_with_groundingdino_gligen.ipynb) -->
64
-
65
-
66
-
67
- <!-- Official PyTorch implementation of [Grounding DINO](https://arxiv.org/abs/2303.05499), a stronger open-set object detector. Code is available now! -->
68
-
69
-
70
- ## :bulb: Highlight
71
-
72
- - **Open-Set Detection.** Detect **everything** with language!
73
- - **High Performancce.** COCO zero-shot **52.5 AP** (training without COCO data!). COCO fine-tune **63.0 AP**.
74
- - **Flexible.** Collaboration with Stable Diffusion for Image Editting.
75
-
76
-
77
-
78
-
79
- ## :fire: News
80
- - **`2023/07/18`**: We release [Semantic-SAM](https://github.com/UX-Decoder/Semantic-SAM), a universal image segmentation model to enable segment and recognize anything at any desired granularity. **Code** and **checkpoint** are available!
81
- - **`2023/06/17`**: We provide an example to evaluate Grounding DINO on COCO zero-shot performance.
82
- - **`2023/04/15`**: Refer to [CV in the Wild Readings](https://github.com/Computer-Vision-in-the-Wild/CVinW_Readings) for those who are interested in open-set recognition!
83
- - **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
84
- - **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
85
- - **`2023/04/06`**: We build a new demo by marrying GroundingDINO with [Segment-Anything](https://github.com/facebookresearch/segment-anything) named **[Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)** aims to support segmentation in GroundingDINO.
86
- - **`2023/03/28`**: A YouTube [video](https://youtu.be/cMa77r3YrDk) about Grounding DINO and basic object detection prompt engineering. [[SkalskiP](https://github.com/SkalskiP)]
87
- - **`2023/03/28`**: Add a [demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo) on Hugging Face Space!
88
- - **`2023/03/27`**: Support CPU-only mode. Now the model can run on machines without GPUs.
89
- - **`2023/03/25`**: A [demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) for Grounding DINO is available at Colab. [[SkalskiP](https://github.com/SkalskiP)]
90
- - **`2023/03/22`**: Code is available Now!
91
-
92
- <details open>
93
- <summary><font size="4">
94
- Description
95
- </font></summary>
96
- <a href="https://arxiv.org/abs/2303.05499">Paper</a> introduction.
97
- <img src=".asset/hero_figure.png" alt="ODinW" width="100%">
98
- Marrying <a href="https://github.com/IDEA-Research/GroundingDINO">Grounding DINO</a> and <a href="https://github.com/gligen/GLIGEN">GLIGEN</a>
99
- <img src="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/GD_GLIGEN.png" alt="gd_gligen" width="100%">
100
- </details>
101
-
102
- ## :star: Explanations/Tips for Grounding DINO Inputs and Outputs
103
- - Grounding DINO accepts an `(image, text)` pair as inputs.
104
- - It outputs `900` (by default) object boxes. Each box has similarity scores across all input words. (as shown in Figures below.)
105
- - We defaultly choose the boxes whose highest similarities are higher than a `box_threshold`.
106
- - We extract the words whose similarities are higher than the `text_threshold` as predicted labels.
107
- - If you want to obtain objects of specific phrases, like the `dogs` in the sentence `two dogs with a stick.`, you can select the boxes with highest text similarities with `dogs` as final outputs.
108
- - Note that each word can be split to **more than one** tokens with different tokenlizers. The number of words in a sentence may not equal to the number of text tokens.
109
- - We suggest separating different category names with `.` for Grounding DINO.
110
- ![model_explain1](.asset/model_explan1.PNG)
111
- ![model_explain2](.asset/model_explan2.PNG)
112
-
113
-
114
- ## :medal_military: Results
115
-
116
- <details open>
117
- <summary><font size="4">
118
- COCO Object Detection Results
119
- </font></summary>
120
- <img src=".asset/COCO.png" alt="COCO" width="100%">
121
- </details>
122
-
123
- <details open>
124
- <summary><font size="4">
125
- ODinW Object Detection Results
126
- </font></summary>
127
- <img src=".asset/ODinW.png" alt="ODinW" width="100%">
128
- </details>
129
-
130
- <details open>
131
- <summary><font size="4">
132
- Marrying Grounding DINO with <a href="https://github.com/Stability-AI/StableDiffusion">Stable Diffusion</a> for Image Editing
133
- </font></summary>
134
- See our example <a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/demo/image_editing_with_groundingdino_stablediffusion.ipynb">notebook</a> for more details.
135
- <img src=".asset/GD_SD.png" alt="GD_SD" width="100%">
136
- </details>
137
-
138
-
139
- <details open>
140
- <summary><font size="4">
141
- Marrying Grounding DINO with <a href="https://github.com/gligen/GLIGEN">GLIGEN</a> for more Detailed Image Editing.
142
- </font></summary>
143
- See our example <a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/demo/image_editing_with_groundingdino_gligen.ipynb">notebook</a> for more details.
144
- <img src=".asset/GD_GLIGEN.png" alt="GD_GLIGEN" width="100%">
145
- </details>
146
-
147
- ## :sauropod: Model: Grounding DINO
148
-
149
- Includes: a text backbone, an image backbone, a feature enhancer, a language-guided query selection, and a cross-modality decoder.
150
-
151
- ![arch](.asset/arch.png)
152
-
153
-
154
- ## :hearts: Acknowledgement
155
-
156
- Our model is related to [DINO](https://github.com/IDEA-Research/DINO) and [GLIP](https://github.com/microsoft/GLIP). Thanks for their great work!
157
-
158
- We also thank great previous work including DETR, Deformable DETR, SMCA, Conditional DETR, Anchor DETR, Dynamic DETR, DAB-DETR, DN-DETR, etc. More related work are available at [Awesome Detection Transformer](https://github.com/IDEACVR/awesome-detection-transformer). A new toolbox [detrex](https://github.com/IDEA-Research/detrex) is available as well.
159
-
160
- Thanks [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) and [GLIGEN](https://github.com/gligen/GLIGEN) for their awesome models.
161
-
162
-
163
- ## :black_nib: Citation
164
-
165
- If you find our work helpful for your research, please consider citing the following BibTeX entry.
166
-
167
- ```bibtex
168
- @article{liu2023grounding,
169
- title={Grounding dino: Marrying dino with grounded pre-training for open-set object detection},
170
- author={Liu, Shilong and Zeng, Zhaoyang and Ren, Tianhe and Li, Feng and Zhang, Hao and Yang, Jie and Li, Chunyuan and Yang, Jianwei and Su, Hang and Zhu, Jun and others},
171
- journal={arXiv preprint arXiv:2303.05499},
172
- year={2023}
173
- }
174
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ash2219/AIchatbot/app.py DELETED
@@ -1,164 +0,0 @@
1
- import os
2
- import re
3
- import requests
4
- import json
5
- import gradio as gr
6
- from langchain.chat_models import ChatOpenAI
7
- from langchain import LLMChain, PromptTemplate
8
- from langchain.memory import ConversationBufferMemory
9
-
10
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
11
- PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY')
12
- PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID')
13
-
14
- PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID')
15
- play_ht_api_get_audio_url = "https://play.ht/api/v2/tts"
16
-
17
-
18
- template = """You are a helpful assistant to answer user queries.
19
- {chat_history}
20
- User: {user_message}
21
- Chatbot:"""
22
-
23
- prompt = PromptTemplate(
24
- input_variables=["chat_history", "user_message"], template=template
25
- )
26
-
27
- memory = ConversationBufferMemory(memory_key="chat_history")
28
-
29
- llm_chain = LLMChain(
30
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
31
- prompt=prompt,
32
- verbose=True,
33
- memory=memory,
34
- )
35
-
36
- headers = {
37
- "accept": "text/event-stream",
38
- "content-type": "application/json",
39
- "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY,
40
- "X-USER-ID": PLAY_HT_USER_ID
41
- }
42
-
43
-
44
- def get_payload(text):
45
- return {
46
- "text": text,
47
- "voice": PLAY_HT_VOICE_ID,
48
- "quality": "medium",
49
- "output_format": "mp3",
50
- "speed": 1,
51
- "sample_rate": 24000,
52
- "seed": None,
53
- "temperature": None
54
- }
55
-
56
- def get_generated_audio(text):
57
- payload = get_payload(text)
58
- generated_response = {}
59
- try:
60
- response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers)
61
- response.raise_for_status()
62
- generated_response["type"]= 'SUCCESS'
63
- generated_response["response"] = response.text
64
- except requests.exceptions.RequestException as e:
65
- generated_response["type"]= 'ERROR'
66
- try:
67
- response_text = json.loads(response.text)
68
- if response_text['error_message']:
69
- generated_response["response"] = response_text['error_message']
70
- else:
71
- generated_response["response"] = response.text
72
- except Exception as e:
73
- generated_response["response"] = response.text
74
- except Exception as e:
75
- generated_response["type"]= 'ERROR'
76
- generated_response["response"] = response.text
77
- return generated_response
78
-
79
- def extract_urls(text):
80
- # Define the regex pattern for URLs
81
- url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*'
82
-
83
- # Find all occurrences of URLs in the text
84
- urls = re.findall(url_pattern, text)
85
-
86
- return urls
87
-
88
- def get_audio_reply_for_question(text):
89
- generated_audio_event = get_generated_audio(text)
90
- #From get_generated_audio, you will get events in a string format, from that we need to extract the url
91
- final_response = {
92
- "audio_url": '',
93
- "message": ''
94
- }
95
- if generated_audio_event["type"] == 'SUCCESS':
96
- audio_urls = extract_urls(generated_audio_event["response"])
97
- if len(audio_urls) == 0:
98
- final_response['message'] = "No audio file link found in generated event"
99
- else:
100
- final_response['audio_url'] = audio_urls[-1]
101
- else:
102
- final_response['message'] = generated_audio_event['response']
103
- return final_response
104
-
105
- def download_url(url):
106
- try:
107
- # Send a GET request to the URL to fetch the content
108
- final_response = {
109
- 'content':'',
110
- 'error':''
111
- }
112
- response = requests.get(url)
113
- # Check if the request was successful (status code 200)
114
- if response.status_code == 200:
115
- final_response['content'] = response.content
116
- else:
117
- final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}"
118
- except Exception as e:
119
- final_response['error'] = f"Failed to download the URL. Error: {e}"
120
- return final_response
121
-
122
- def get_filename_from_url(url):
123
- # Use os.path.basename() to extract the file name from the URL
124
- file_name = os.path.basename(url)
125
- return file_name
126
-
127
- def get_text_response(user_message):
128
- response = llm_chain.predict(user_message = user_message)
129
- return response
130
-
131
- def get_text_response_and_audio_response(user_message):
132
- response = get_text_response(user_message) # Getting the reply from Open AI
133
- audio_reply_for_question_response = get_audio_reply_for_question(response)
134
- final_response = {
135
- 'output_file_path': '',
136
- 'message':''
137
- }
138
- audio_url = audio_reply_for_question_response['audio_url']
139
- if audio_url:
140
- output_file_path=get_filename_from_url(audio_url)
141
- download_url_response = download_url(audio_url)
142
- audio_content = download_url_response['content']
143
- if audio_content:
144
- with open(output_file_path, "wb") as audio_file:
145
- audio_file.write(audio_content)
146
- final_response['output_file_path'] = output_file_path
147
- else:
148
- final_response['message'] = download_url_response['error']
149
- else:
150
- final_response['message'] = audio_reply_for_question_response['message']
151
- return final_response
152
-
153
- def chat_bot_response(message, history):
154
- text_and_audio_response = get_text_response_and_audio_response(message)
155
- output_file_path = text_and_audio_response['output_file_path']
156
- if output_file_path:
157
- return (text_and_audio_response['output_file_path'],)
158
- else:
159
- return text_and_audio_response['message']
160
-
161
- demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"])
162
-
163
- if __name__ == "__main__":
164
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/control.py DELETED
@@ -1,225 +0,0 @@
1
- import sys
2
- import time
3
- from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union
4
-
5
- if sys.version_info >= (3, 8):
6
- from typing import Final
7
- else:
8
- from pip._vendor.typing_extensions import Final # pragma: no cover
9
-
10
- from .segment import ControlCode, ControlType, Segment
11
-
12
- if TYPE_CHECKING:
13
- from .console import Console, ConsoleOptions, RenderResult
14
-
15
- STRIP_CONTROL_CODES: Final = [
16
- 7, # Bell
17
- 8, # Backspace
18
- 11, # Vertical tab
19
- 12, # Form feed
20
- 13, # Carriage return
21
- ]
22
- _CONTROL_STRIP_TRANSLATE: Final = {
23
- _codepoint: None for _codepoint in STRIP_CONTROL_CODES
24
- }
25
-
26
- CONTROL_ESCAPE: Final = {
27
- 7: "\\a",
28
- 8: "\\b",
29
- 11: "\\v",
30
- 12: "\\f",
31
- 13: "\\r",
32
- }
33
-
34
- CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
35
- ControlType.BELL: lambda: "\x07",
36
- ControlType.CARRIAGE_RETURN: lambda: "\r",
37
- ControlType.HOME: lambda: "\x1b[H",
38
- ControlType.CLEAR: lambda: "\x1b[2J",
39
- ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
40
- ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
41
- ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
42
- ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
43
- ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
44
- ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
45
- ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
46
- ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
47
- ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
48
- ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
49
- ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
50
- ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
51
- }
52
-
53
-
54
- class Control:
55
- """A renderable that inserts a control code (non printable but may move cursor).
56
-
57
- Args:
58
- *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
59
- tuple of ControlType and an integer parameter
60
- """
61
-
62
- __slots__ = ["segment"]
63
-
64
- def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
65
- control_codes: List[ControlCode] = [
66
- (code,) if isinstance(code, ControlType) else code for code in codes
67
- ]
68
- _format_map = CONTROL_CODES_FORMAT
69
- rendered_codes = "".join(
70
- _format_map[code](*parameters) for code, *parameters in control_codes
71
- )
72
- self.segment = Segment(rendered_codes, None, control_codes)
73
-
74
- @classmethod
75
- def bell(cls) -> "Control":
76
- """Ring the 'bell'."""
77
- return cls(ControlType.BELL)
78
-
79
- @classmethod
80
- def home(cls) -> "Control":
81
- """Move cursor to 'home' position."""
82
- return cls(ControlType.HOME)
83
-
84
- @classmethod
85
- def move(cls, x: int = 0, y: int = 0) -> "Control":
86
- """Move cursor relative to current position.
87
-
88
- Args:
89
- x (int): X offset.
90
- y (int): Y offset.
91
-
92
- Returns:
93
- ~Control: Control object.
94
-
95
- """
96
-
97
- def get_codes() -> Iterable[ControlCode]:
98
- control = ControlType
99
- if x:
100
- yield (
101
- control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
102
- abs(x),
103
- )
104
- if y:
105
- yield (
106
- control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
107
- abs(y),
108
- )
109
-
110
- control = cls(*get_codes())
111
- return control
112
-
113
- @classmethod
114
- def move_to_column(cls, x: int, y: int = 0) -> "Control":
115
- """Move to the given column, optionally add offset to row.
116
-
117
- Returns:
118
- x (int): absolute x (column)
119
- y (int): optional y offset (row)
120
-
121
- Returns:
122
- ~Control: Control object.
123
- """
124
-
125
- return (
126
- cls(
127
- (ControlType.CURSOR_MOVE_TO_COLUMN, x),
128
- (
129
- ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
130
- abs(y),
131
- ),
132
- )
133
- if y
134
- else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
135
- )
136
-
137
- @classmethod
138
- def move_to(cls, x: int, y: int) -> "Control":
139
- """Move cursor to absolute position.
140
-
141
- Args:
142
- x (int): x offset (column)
143
- y (int): y offset (row)
144
-
145
- Returns:
146
- ~Control: Control object.
147
- """
148
- return cls((ControlType.CURSOR_MOVE_TO, x, y))
149
-
150
- @classmethod
151
- def clear(cls) -> "Control":
152
- """Clear the screen."""
153
- return cls(ControlType.CLEAR)
154
-
155
- @classmethod
156
- def show_cursor(cls, show: bool) -> "Control":
157
- """Show or hide the cursor."""
158
- return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
159
-
160
- @classmethod
161
- def alt_screen(cls, enable: bool) -> "Control":
162
- """Enable or disable alt screen."""
163
- if enable:
164
- return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
165
- else:
166
- return cls(ControlType.DISABLE_ALT_SCREEN)
167
-
168
- @classmethod
169
- def title(cls, title: str) -> "Control":
170
- """Set the terminal window title
171
-
172
- Args:
173
- title (str): The new terminal window title
174
- """
175
- return cls((ControlType.SET_WINDOW_TITLE, title))
176
-
177
- def __str__(self) -> str:
178
- return self.segment.text
179
-
180
- def __rich_console__(
181
- self, console: "Console", options: "ConsoleOptions"
182
- ) -> "RenderResult":
183
- if self.segment.text:
184
- yield self.segment
185
-
186
-
187
- def strip_control_codes(
188
- text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
189
- ) -> str:
190
- """Remove control codes from text.
191
-
192
- Args:
193
- text (str): A string possibly contain control codes.
194
-
195
- Returns:
196
- str: String with control codes removed.
197
- """
198
- return text.translate(_translate_table)
199
-
200
-
201
- def escape_control_codes(
202
- text: str,
203
- _translate_table: Dict[int, str] = CONTROL_ESCAPE,
204
- ) -> str:
205
- """Replace control codes with their "escaped" equivalent in the given text.
206
- (e.g. "\b" becomes "\\b")
207
-
208
- Args:
209
- text (str): A string possibly containing control codes.
210
-
211
- Returns:
212
- str: String with control codes replaced with their escaped version.
213
- """
214
- return text.translate(_translate_table)
215
-
216
-
217
- if __name__ == "__main__": # pragma: no cover
218
- from pip._vendor.rich.console import Console
219
-
220
- console = Console()
221
- console.print("Look at the title of your terminal window ^")
222
- # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
223
- for i in range(10):
224
- console.set_window_title("🚀 Loading" + "." * i)
225
- time.sleep(0.5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/Dockerfile DELETED
@@ -1,29 +0,0 @@
1
- # syntax=docker/dockerfile:1
2
-
3
- FROM python:3.10-bullseye
4
-
5
- EXPOSE 7865
6
-
7
- WORKDIR /app
8
-
9
- COPY . .
10
-
11
- RUN apt update && apt install -y -qq ffmpeg aria2 && apt clean
12
-
13
- RUN pip3 install --no-cache-dir -r requirements.txt
14
-
15
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d assets/pretrained_v2/ -o D40k.pth
16
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d assets/pretrained_v2/ -o G40k.pth
17
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d assets/pretrained_v2/ -o f0D40k.pth
18
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d assets/pretrained_v2/ -o f0G40k.pth
19
-
20
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d assets/uvr5_weights/ -o HP2-人声vocals+非人声instrumentals.pth
21
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d assets/uvr5_weights/ -o HP5-主旋律人声vocals+其他instrumentals.pth
22
-
23
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d assets/hubert -o hubert_base.pt
24
-
25
- RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d assets/hubert -o rmvpe.pt
26
-
27
- VOLUME [ "/app/weights", "/app/opt" ]
28
-
29
- CMD ["python3", "infer-web.py"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choque De Clanes Nulos.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Choque de clanes nulos: ¿Qué son y cómo usarlos</h1>
3
- <p>Clash of Clans es uno de los juegos móviles más populares del mundo, con millones de jugadores compitiendo por recursos, trofeos y gloria. ¿Pero sabías que hay algunos jugadores que no pertenecen a ningún clan? Se les llama nulos, y tienen sus propias ventajas y desventajas. En este artículo, explicaremos qué son los nulls, por qué la gente los usa y cómo puedes usarlos para tu beneficio. </p>
4
- <h2>Introducción</h2>
5
- <h3>¿Qué es el choque de clanes? </h3>
6
- <p>Clash of Clans es un juego de estrategia donde construyes tu propia aldea, entrenas a tu ejército y atacas las bases de otros jugadores. También puedes unirte o crear un clan, que es un grupo de jugadores que pueden chatear, donar tropas y participar en guerras de clanes. Las guerras de clanes son eventos especiales donde dos clanes se enfrentan en una serie de ataques, y el clan con más estrellas gana. Las estrellas se ganan destruyendo un cierto porcentaje de la base del enemigo. </p>
7
- <h2>choque de clanes nulos</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://bltlly.com/2v6MFt">https://bltlly.com/2v6MFt</a></b></p><br /><br />
8
- <h3>¿Qué son los nulos en Clash of Clans? </h3>
9
- <p>Los nulos son jugadores que no pertenecen a ningún clan. No tienen nombre de clan, ni insignia de clan, ni chat de clan. Todavía pueden atacar las bases de otros jugadores, pero no pueden participar en guerras de clanes o recibir donaciones de otros jugadores. Hay tres tipos de nulos: inactivos, prohibidos y abusados. </p>
10
- <h3>¿Por qué la gente usa nulos en Clash of Clans? </h3>
11
- <p>Hay diferentes razones por las que la gente usa nulos en Clash of Clans. Algunos los usan por diversión, algunos los usan para recursos agrícolas, algunos los usan para probar estrategias y otros los usan para hacer trampa. Aquí hay algunos ejemplos:</p>
12
- <ul>
13
- <li>Algunas personas usan nulls por diversión, porque les gusta jugar con tropas dominadas o experimentar con diferentes diseños de base. Por ejemplo, Null’s Clash es un servidor privado donde puedes tener gemas ilimitadas, oro, elixir y elixir oscuro. También puedes construir bases enormes y atacar a otros con las tropas que quieras. </li>
14
-
15
- <li>Algunas personas usan nulos para probar estrategias, porque pueden practicar sus ataques sin perder trofeos o recursos. Por ejemplo, pruebas de nulos son cuentas que se utilizan para simular diferentes escenarios y resultados. </li>
16
- <li>Algunas personas usan nulls para hacer trampa, porque pueden manipular el sistema de emparejamiento o explotar fallas. Por ejemplo, los nulos tramposos son cuentas que se utilizan para obtener ventajas injustas o sabotear a otros jugadores. </li>
17
- </ul>
18
- <h2>Tipos de nulos en Choque de clanes</h2>
19
- <h3>Nulos inactivos</h3>
20
- <p>Los nulos inactivos son cuentas que han sido abandonadas por sus propietarios. No han iniciado sesión durante mucho tiempo, y sus bases suelen estar desactualizadas y mal defendidas. Son objetivos fáciles para otros jugadores que quieren saquear sus recursos. </p>
21
- <h4>Pros y contras de los nulos inactivos</h4>
22
- <p>Los pros de los nulos inactivos son:</p>
23
- <ul>
24
- <li>Proporcionan una gran cantidad de <p>Los contras de los nulos inactivos son:</p>
25
- <ul>
26
- <li>Son aburridos para jugar, porque no tienen chat de clan, ni guerras de clan, ni donaciones. </li>
27
- <li>Son vulnerables a los ataques, porque no tienen escudo, ni guardia, ni tropas del castillo del clan. </li>
28
- <li>Son derrochadores, porque tienen recursos, edificios y tropas sin usar. </li>
29
- </ul>
30
- <h3>Nulos prohibidos</h3>
31
- <p>Los nulos prohibidos son cuentas que han sido suspendidas o terminadas por Supercell, el desarrollador de Clash of Clans. Han violado los términos de servicio o la política de juego limpio, y sus bases suelen estar marcadas con una bandera roja. Son inaccesibles para sus dueños y otros jugadores. </p>
32
- <p></p>
33
- <h4>Pros y contras de los nulos prohibidos</h4>
34
- <p>Los pros de los nulos prohibidos son:</p>
35
- <ul>
36
- <li>Sirven como una advertencia, porque muestran las consecuencias de hacer trampa o romper las reglas. </li>
37
- <li>Crean un entorno más justo, porque eliminan a los jugadores que tienen una ventaja injusta o un impacto negativo en el juego. </li>
38
- <li>Liberan espacio, porque reducen el número de cuentas en el juego. </li>
39
-
40
- <p>Los contras de los nulos prohibidos son:</p>
41
- <ul>
42
- <li>Son frustrantes, porque impiden a los propietarios acceder a sus cuentas o recuperar su progreso. </li>
43
- <li>Son injustos, porque pueden afectar a jugadores inocentes que han sido reportados o prohibidos falsamente por error. </li>
44
- <li>Son ineficaces, porque pueden no disuadir a algunos tramposos que pueden crear nuevas cuentas o utilizar otros métodos para evitar la prohibición. </li>
45
- </ul>
46
- <h3>Nulos abusados</h3>
47
- <p>Los nulos abusados son cuentas que han sido hackeadas, robadas o vendidas por sus propietarios. Han sido comprometidos por usuarios no autorizados que pueden utilizarlos con fines maliciosos. Sus bases suelen ser cambiadas o dañadas por los hackers o compradores. </p>
48
- <h4>Pros y contras de los nulos abusados</h4>
49
- <p>Los pros de los nulos abusados son:</p>
50
- <ul>
51
- <li>Proporcionan un desafío, porque pueden tener tropas o defensas más fuertes de lo esperado. </li>
52
- <li> Ofrecen una variedad, porque pueden tener diferentes diseños de base o estrategias de lo habitual. </li>
53
- <li>Crean un mercado, porque pueden generar ingresos para los vendedores o compradores de las cuentas. </li>
54
- </ul>
55
- <p>Los contras de los nulos abusados son:</p>
56
- <ul>
57
- <li>Son arriesgados, porque pueden exponer a los propietarios a robo de identidad, fraude o problemas legales. </li>
58
- <li>Son poco éticos, porque violan los términos de servicio y la política de juego limpio del juego. </li>
59
- <li>Son perjudiciales, porque pueden arruinar la experiencia de juego para los propietarios u otros jugadores. </li>
60
- </ul>
61
- <h2>Cómo usar nulls en Clash of Clans</h2>
62
- <h3>Cómo encontrar nulos en Clash of Clans</h3>
63
- <p>Encontrar nulos en Clash of Clans no es fácil, pero hay algunas maneras de hacerlo. Estos son algunos consejos:</p>
64
- <ul>
65
- <li>Puede usar un sitio web o aplicación de terceros que rastrea y enumera los nulos en Clash of Clans. Por ejemplo, Null Finder es un sitio web que le permite buscar nulos por nombre, nivel, liga o ubicación. Sin embargo, tenga cuidado al usar estas herramientas, ya que pueden no ser precisas, confiables o seguras. </li>
66
-
67
- <li>Puedes usar tu propia observación e intuición para detectar nulos en Clash of Clans. Por ejemplo, puedes revisar el perfil de un jugador y ver si no tiene nombre de clan, ni insignia de clan, ni historial de chat de clan, ni donaciones recibidas o dadas, ni estrellas de guerra ganadas o perdidas, o ninguna actividad reciente. Sin embargo, este método no es muy concluyente, ya que puede haber otras razones por las que un jugador tiene estas características. </li>
68
- </ul>
69
- <h3>Cómo unirse o crear un clan con nulos en Clash of Clans</h3>
70
- <p>Es posible unirse o crear un clan con nulos en Clash of Clans, pero no se recomienda. Aquí hay algunas razones por las que:</p>
71
- <ul>
72
- <li>Te perderás los beneficios de estar en un clan, como chat de clan, guerras de clan, juegos de clan, beneficios de clan y donaciones de clan. </li>
73
- <li>Te será más difícil encontrar o atraer a otros jugadores para que se unan a tu clan, ya que los nulos no son muy populares o atractivos. </li>
74
- <li> Usted tendrá un mayor riesgo de perder su cuenta o ser prohibido, ya que los nulos se asocian a menudo con el engaño o la piratería. </li>
75
- </ul>
76
- <p>Si todavía quieres unirte o crear un clan con nulos en Clash of Clans, aquí hay algunos pasos:</p>
77
- <ul>
78
- <li>Para unirse a un clan con nulos, necesitas encontrar uno que esté abierto o tenga un enlace de invitación. Puede utilizar los métodos mencionados anteriormente para encontrar los nulos y, a continuación, comprobar la información de su clan. Si el clan está abierto, simplemente puede solicitar unirse. Si el clan tiene un enlace de invitación, necesitas copiarlo y pegarlo en tu navegador y seguir las instrucciones. </li>
79
- <li>Para crear un clan con nulos, necesitas tener al menos una cuenta nula. Puede usar un servidor privado, una cuenta agrícola, una cuenta de prueba o una cuenta de engaño para crear un nulo. Luego, tienes que ir a la pestaña de clan y tocar el botón "Crear clan". Puedes elegir cualquier nombre, insignia, descripción y configuración para tu clan. Sin embargo, ten en cuenta que el nombre de tu clan puede ser cambiado por Supercell si es inapropiado u ofensivo. </li>
80
- </ul>
81
-
82
- <p>Administrar nulos en Clash of Clans no es fácil, pero hay algunos consejos para hacerlo. Aquí hay algunas sugerencias:</p>
83
- <ul>
84
- <li>Puedes usar un sitio web o aplicación de terceros que te ayude a monitorear y controlar tus nulos en Clash of Clans. Por ejemplo, Null Manager es un sitio web que le permite ver los perfiles, bases, tropas, recursos y actividades de sus nulos. También puede editar la configuración de sus nulos, como cambiar su nombre, insignia o idioma. Sin embargo, ten cuidado al usar estas herramientas, ya que pueden no ser seguras, legales o compatibles con el juego. </li>
85
- <li>Puedes usar las funciones del juego para administrar tus nulos en Clash of Clans. Por ejemplo, puede usar la opción "Marcador" para guardar los perfiles de sus nulos para facilitar el acceso. También puedes usar la opción "Desafío Amistoso" para probar las bases o tropas de tus nulos. Sin embargo, este método no es muy conveniente, ya que tiene que cambiar entre diferentes cuentas y dispositivos. </li>
86
- <li>Puedes usar tu propia estrategia y creatividad para gestionar tus nulos en Clash of Clans. Por ejemplo, puedes usar las bases de tus nulos como señuelos o distracciones para otros jugadores. También puede utilizar sus tropas nulas como apoyo o respaldo para su cuenta principal. Sin embargo, este método no es muy confiable, ya que puede encontrar problemas o limitaciones inesperadas. </li>
87
- </ul>
88
- <h2>Conclusión</h2>
89
- <h3>Resumen de los puntos principales</h3>
90
- <p>En conclusión, los nulos son jugadores que no pertenecen a ningún clan en Clash of Clans. Tienen sus propios pros y contras, dependiendo de cómo y por qué se usan. Hay tres tipos de nulos: inactivos, prohibidos y abusados. Puede encontrar, unirse, crear y administrar nulos en Clash of Clans usando varios métodos y herramientas. </p>
91
- <h3>Llamada a la acción para los lectores</h3>
92
-
93
- <p>Si tienes alguna pregunta o comentario sobre nulos en Clash of Clans, siéntete libre de dejarlos abajo. ¡Nos encantaría saber de ti! </p>
94
- <h2>Preguntas frecuentes</h2>
95
- <p>Aquí hay algunas preguntas frecuentes sobre los nulos en Clash of Clans:</p>
96
- <ol>
97
- <li><b>¿Cuál es la diferencia entre un nulo y un invitado? </b></li>
98
- <p>Un invitado es un jugador que no ha vinculado su cuenta a ningún correo electrónico o plataforma de redes sociales. Un invitado puede unirse o crear un clan en Clash of Clans. Un nulo es un jugador que no tiene nombre de clan, insignia o chat. Un null no puede unirse o crear un clan en Clash of Clans.</p>
99
- <li><b>¿Cómo puedo evitar convertirme en un nulo en Clash of Clans? </b></li>
100
- <p>Puedes evitar convertirte en un nulo en Clash of Clans siguiendo estos pasos:</p>
101
- <ul>
102
- <li>Vincula tu cuenta a un correo electrónico válido o a una plataforma de redes sociales, para que puedas recuperarla si la pierdes o cambias tu dispositivo. </li>
103
- <li>Únete o crea un clan que se adapte a tu estilo de juego y preferencias, para que puedas disfrutar del juego con otros jugadores. </li>
104
- <li>Siga los términos de servicio y la política de juego limpio del juego, para que no sea suspendido o prohibido por Supercell.</li>
105
- <li>Protege tu cuenta de hackers, estafadores o vendedores, para que no la pierdas o la comprometas con usuarios no autorizados. </li>
106
- </ul>
107
- <li><b>¿Los nulos son ilegales o van en contra de las reglas en Clash of Clans? </b></li>
108
- <p>Los nulos no son ilegales ni están en contra de las reglas en Clash of Clans, siempre y cuando no se utilicen para engañar o dañar a otros jugadores. Sin embargo, algunos métodos o herramientas para crear o usar nulos pueden ser ilegales o ir en contra de las reglas, como usar servidores privados, hackear cuentas o vender cuentas. Supercell puede tomar medidas contra estos métodos o herramientas, y puede suspender o prohibir las cuentas involucradas. </p>
109
- <li><b>¿Puedo reportar un null en Clash of Clans? </b></li>
110
-
111
- <li><b>¿Puedo jugar con nulos en Clash of Clans? </b></li>
112
- <p>Puedes jugar con nulos en Clash of Clans si quieres, pero debes ser consciente de los riesgos y consecuencias. Es posible que no tengas la misma experiencia de juego que jugar con jugadores normales, y puedes encontrar algunos problemas o limitaciones. También puede enfrentar alguna reacción o crítica de otros jugadores a los que no les gusten o no aprueben los nulos. En última instancia, es su elección si desea jugar con nulos o no, pero le recomendamos que juegue con precaución y respeto. </p>
113
- </ol></p> 64aa2da5cf<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Apk Avance Servidor.md DELETED
@@ -1,100 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar gratis fuego APK Advance Server</h1>
3
- <p>Free Fire es uno de los juegos battle royale más populares en dispositivos móviles, con millones de jugadores en todo el mundo. Si eres un fan de Free Fire, podrías estar interesado en probar las últimas características y actualizaciones antes de que se publiquen oficialmente. Esto es posible mediante la descarga de Free Fire APK Advance Server, una versión especial del juego que le permite probar nuevos contenidos y proporcionar comentarios a Garena.</p>
4
- <p>En este artículo, le mostraremos cómo descargar Free Fire APK Advance Server, cuáles son sus características, cómo informar de errores y comentarios, y cómo desinstalarlo de su dispositivo. Siguiendo esta guía, podrás disfrutar de Free Fire como nunca antes. </p>
5
- <h2>descargar gratis fuego apk avance servidor</h2><br /><p><b><b>DOWNLOAD</b> &#11088; <a href="https://bltlly.com/2v6Lea">https://bltlly.com/2v6Lea</a></b></p><br /><br />
6
- <h2>¿Qué es Free Fire APK Advance Server? </h2>
7
- <p>Free Fire APK Advance Server es un programa que permite a los jugadores seleccionados acceder a una versión beta de Free Fire que contiene características y actualizaciones inéditas. El propósito de este programa es permitir que los jugadores experimenten nuevos contenidos con anticipación y ayudar a Garena a mejorar el juego informando de cualquier problema o fallo que se encuentren. </p>
8
- <p>Free Fire APK Advance Server no está disponible para todos. Solo los jugadores que se hayan registrado para el programa y hayan recibido un código de activación pueden entrar en la fase de prueba. Además, hay un número limitado de ranuras disponibles para cada ciclo de actualización, por lo que debe darse prisa si desea unirse. </p>
9
- <p>Los beneficios de unirse a Free Fire APK Advance Server son:</p>
10
- <ul>
11
- <li>Puedes jugar nuevos personajes, mascotas, armas, objetos, modos y mapas antes que nadie. </li>
12
- <li> Usted puede proporcionar información valiosa y sugerencias a Garena para hacer Free Fire mejor. </li>
13
- <li>Puedes ganar recompensas por reportar errores y problemas. </li>
14
- </ul>
15
- <h2>¿Cómo Registrarse para Free Fire APK Advance Server? </h2>
16
- <p>Si desea unirse a Free Fire APK Advance Server, usted tiene que registrarse en el sitio web oficial de Garena. Estos son los pasos que debes seguir:</p>
17
- <ol>
18
-
19
- <li>Rellene su información personal, como su nombre, correo electrónico y número de teléfono. </li>
20
- <li>Envía tu solicitud y espera el correo de confirmación. </li>
21
- </ol>
22
- <p>Si estás seleccionado para el programa, recibirás un correo electrónico con un código de activación que puedes usar para entrar en Free Fire APK Advance Server. El código es único y solo puede ser utilizado por una persona. Tiene que usar el código dentro de un cierto período de tiempo, de lo contrario caducará. </p>
23
- <h3>¿Cómo obtener el código de activación? </h3>
24
- <p>Obtener el código de activación no es fácil, ya que hay muchos jugadores que quieren unirse a Free Fire APK Advance Server. Sin embargo, hay algunos consejos y trucos que pueden aumentar tus posibilidades de obtener el código:</p>
25
- <p></p>
26
- <ul>
27
- <li>Regístrese lo antes posible, ya que las ranuras son limitadas y se llenan rápidamente. </li>
28
- <li>Revise su correo electrónico regularmente, ya que el código puede ser enviado en cualquier momento. </li>
29
- <li>Sigue las cuentas de redes sociales de Garena, como Facebook, Instagram y YouTube, ya que podrían anunciar sorteos o concursos para el código. </li>
30
- <li>Invite a sus amigos a registrarse para el programa, ya que podrían compartir su código con usted si lo consiguen. </li>
31
- </ul>
32
- <h3>¿Cómo descargar e instalar el archivo APK? </h3>
33
- <p>Una vez que tenga el código de activación, puede descargar e instalar Free Fire APK Advance Server en su dispositivo. Estos son los pasos que debes seguir:</p>
34
- <ol>
35
- <li>Visite <a href=">https://ff-advance.ff.garena.com/download</a> y haga clic en el botón de descarga. </li>
36
- <li>Permita que su dispositivo instale aplicaciones de fuentes desconocidas. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y habilitándolo. </li>
37
- <li>Busque el archivo APK descargado en su administrador de archivos y toque en él para instalarlo. </li>
38
- <li>Abra la aplicación e introduzca su código de activación cuando se le solicite. </li>
39
- <li>Disfruta jugando gratis fuego APK Advance Server.</li>
40
- </ol>
41
- <h2>¿Cuáles son las características de Free Fire APK Advance Server? </h2>
42
-
43
- <h3>Nuevos personajes y mascotas</h3>
44
- <p>Puedes jugar con nuevos personajes y mascotas que tienen habilidades y habilidades únicas. Por ejemplo, puedes probar:</p>
45
- <ul>
46
- <li><b>Kelly "The Swift"</b>: Un personaje que puede correr más rápido que cualquier otro. Ella tiene una habilidad pasiva llamada Dash que aumenta su velocidad de sprint en un 1% para cada nivel. También tiene una habilidad activa llamada Velocidad Mortal que aumenta su daño en un 110% durante 5 segundos después de correr durante 7 segundos. </li>
47
- <li><b>Moco "Ojo de hacker"</b>: Un personaje que puede hackear las ubicaciones y movimientos de los enemigos. Ella tiene una habilidad pasiva llamada Ojo de Hacker que etiqueta a los enemigos disparados por ella o sus compañeros de equipo durante 5 segundos. Las ubicaciones de los enemigos etiquetados se comparten con sus compañeros de equipo. </li>
48
- <li><b>Nutty "La ardilla"</b>: Una mascota que puede ayudarte a encontrar el botín más rápido. Tiene una habilidad llamada Regalo de Nuez que aumenta la probabilidad de encontrar cajas de botín en un 10%. </li>
49
- </ul>
50
- <h3>Nuevas armas y objetos</h3>
51
- <p>Puedes usar nuevas armas y objetos que tengan diferentes efectos y ventajas. Por ejemplo, puedes probar:</p>
52
- <ul>
53
- <li><b>Gloo Wall Grenade</b>: Un elemento desechable que crea una pared temporal que bloquea el fuego enemigo. Puedes usarlo para crear cobertura o atrapar enemigos. </li>
54
- <li><b>M82B Rifle de francotirador</b>: Un arma poderosa que puede penetrar paredes gloo e infligir daño masivo. Tiene un alcance que puede acercar hasta 8x y una capacidad de cargador de 8 balas. </li>
55
- <li><b>Hoguera</b>: Un elemento consumible que crea un fuego que te cura a ti y a tus compañeros de equipo. Puede colocarlo en el suelo y sentarse alrededor de él para restaurar 10 HP por segundo durante 10 segundos. </li>
56
- </ul>
57
- <h3>Nuevos modos y mapas</h3>
58
- <p>Puedes jugar nuevos modos y mapas que tienen diferentes retos y objetivos. Por ejemplo, puedes probar:</p>
59
- <ul>
60
-
61
- <li><b>Mapa de Kalahari</b>: Un nuevo mapa que se encuentra en un desierto con varios terrenos y estructuras. Puede explorar cuevas, cañones, oasis y ruinas. También puede encontrar vehículos y tirolinas para moverse más rápido. </li>
62
- <li><b>Campo de entrenamiento</b>: Un mapa donde puedes practicar tus habilidades y probar diferentes armas y objetos. También puedes interactuar con otros jugadores y unirte a minijuegos. </li>
63
- </ul>
64
- <h2>¿Cómo reportar errores y comentarios? </h2>
65
- <p>Como probador de Free Fire APK Advance Server, usted tiene la responsabilidad de informar de cualquier error o problemas que se encuentran durante el juego. Esto ayudará a Garena a arreglarlos y mejorar la calidad del juego. También puedes proporcionar comentarios y sugerencias sobre cómo mejorar el juego. </p>
66
- <p>Para reportar errores y comentarios, tienes que usar la función de informe dentro del juego. Estos son los pasos que debes seguir:</p>
67
- <ol>
68
- <li>Abre el menú del juego y toca el icono del informe. </li>
69
- <li>Seleccione el tipo de problema que desea reportar, como juego, gráficos, sonido u otros. </li>
70
- <li>Describa el problema en detalle y adjunte una captura de pantalla o un video si es posible. </li>
71
- <li>Envía tu informe y espera la respuesta de Garena. </li>
72
- </ol>
73
- <p>Por cada informe válido que envíe, recibirá una recompensa de 100 diamantes. Puede usar estos diamantes para comprar artículos en la tienda de juegos. Sin embargo, tienes que ser honesto y preciso en tus informes, ya que Garena los verificará y prohibirá cualquier informe falso o abusivo. </p>
74
- <h2> ¿Cómo desinstalar Free Fire APK Advance Server? </h2>
75
- <p>Si desea desinstalar Free Fire APK Advance Server desde su dispositivo, usted tiene que seguir estos pasos:</p>
76
- <ol>
77
- <li>Ir a Configuración > Aplicaciones > Servidor Gratis Fire Advance y toque en Desinstalar.</li>
78
- <li>Confirma tu acción y espera a que la aplicación se elimine de tu dispositivo. </li>
79
- <li>Eliminar el archivo APK de su gestor de archivos si todavía lo tiene. </li>
80
- </ol>
81
-
82
- <h2>Conclusión</h2>
83
- <p>Free Fire APK Advance Server es una gran manera de experimentar nuevos contenidos y actualizaciones antes de que se lancen oficialmente. Al unirse a este programa, puedes jugar con nuevos personajes, mascotas, armas, objetos, modos y mapas. También puede proporcionar comentarios y sugerencias a Garena y ganar recompensas por reportar errores y problemas. </p>
84
- <p>Si desea descargar Free Fire APK Advance Server, usted tiene que registrarse en el sitio web de Garena y obtener un código de activación. Luego, puede descargar e instalar el archivo APK en su dispositivo e ingresar a la fase de prueba. Sin embargo, tienes que ser rápido ya que hay espacios limitados disponibles para cada ciclo de actualización. </p>
85
- <p>Esperamos que este artículo le ha ayudado a entender cómo descargar Free Fire APK Advance Server y cuáles son sus características. Si tiene alguna pregunta o comentario, siéntase libre de dejarlos abajo. ¡Feliz juego! </p>
86
- <h3>Preguntas frecuentes</h3>
87
- <ul>
88
- <li><b>Q: ¿Es seguro descargar Free Fire APK Advance Server? </b></li>
89
- <li>A: Sí, Free Fire APK Advance Server es seguro de descargar, siempre y cuando se obtiene desde el sitio web oficial de Garena. No lo descargue de ninguna otra fuente ya que podría contener virus o malware. </li>
90
- <li><b>Q: ¿Puedo jugar Free Fire APK Advance Server con mis amigos? </b></li>
91
- <li>A: Sí, puedes jugar Free Fire APK Advance Server con tus amigos si también tienen el código de activación y la aplicación instalada en sus dispositivos. Puedes invitarlos a unirse a tu equipo o jugar contra ellos en diferentes modos. </li>
92
- <li><b>Q: ¿Se guardará mi progreso en Free Fire APK Advance Server en mi cuenta original de Free Fire? </b></li>
93
- <li>A: No, su progreso en Free Fire APK Advance Server no se guardará en su cuenta Free Fire original. Son aplicaciones separadas con datos separados. Usted comenzará desde cero en Free Fire APK Advance Server y perder todo cuando se desinstala. </li>
94
- <li><b>Q: ¿Con qué frecuencia se actualiza Free Fire APK Advance Server? </b></li>
95
-
96
- <li><b>Q: ¿Cómo puedo contactar a Garena si tengo algún problema o sugerencia con respecto a Free Fire APK Advance Server? </b></li>
97
- <li>A: Puede A: Puede ponerse en contacto con Garena enviando un correo electrónico a [email protected] o rellenando el formulario de comentarios en su sitio web. También puedes llegar a ellos a través de sus cuentas de redes sociales, como Facebook, Instagram y YouTube.</li>
98
- </ul></p> 64aa2da5cf<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigSalmon/InformalToFormal/app.py DELETED
@@ -1,58 +0,0 @@
1
- import streamlit as st
2
- import numpy as np
3
- import pandas as pd
4
- import os
5
- import torch
6
- import torch.nn as nn
7
- from transformers import AutoTokenizer, AutoModelWithLMHead, AutoModelForCausalLM
8
- from transformers.activations import get_activation
9
-
10
-
11
- st.title('Informal to Formal:')
12
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
-
14
- st.text('''Check out this other space: https://huggingface.co/spaces/BigSalmon/GPT2Space''')
15
-
16
- st.text('''How To Make Prompt: https://huggingface.co/BigSalmon/DefinitionsSynonyms3
17
-
18
- part of speech- verb
19
- definition: grow less in intensity or degree
20
- ex. rather than leave immediately and be drenched, they waited for the storm to ________
21
- synonyms: subside; moderate; decrease
22
- antonyms: increase
23
- word: abate''')
24
-
25
- @st.cache(allow_output_mutation=True)
26
- def get_model():
27
- #tokenizer = AutoTokenizer.from_pretrained("gpt2")
28
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln12")
29
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/Points")
30
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln91Paraphrase")
31
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln91Paraphrase")
32
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase")
33
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase")
34
- tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms3")
35
- model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms3")
36
- return model, tokenizer
37
-
38
- model, tokenizer = get_model()
39
-
40
- with st.form(key='my_form'):
41
- prompt = st.text_area(label='Enter sentence')
42
- submit_button = st.form_submit_button(label='Submit')
43
-
44
- if submit_button:
45
- with torch.no_grad():
46
- text = tokenizer.encode(prompt)
47
- myinput, past_key_values = torch.tensor([text]), None
48
- myinput = myinput
49
- myinput= myinput.to(device)
50
- logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
51
- logits = logits[0,-1]
52
- probabilities = torch.nn.functional.softmax(logits)
53
- best_logits, best_indices = logits.topk(100)
54
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
55
- text.append(best_indices[0].item())
56
- best_probabilities = probabilities[best_indices].tolist()
57
- words = []
58
- st.write(best_words)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/register_coco.py DELETED
@@ -1,125 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import copy
3
-
4
- from detectron2.data import DatasetCatalog, MetadataCatalog
5
-
6
- from .coco import load_coco_json, load_sem_seg
7
-
8
- """
9
- This file contains functions to register a COCO-format dataset to the DatasetCatalog.
10
- """
11
-
12
- __all__ = ["register_coco_instances", "register_coco_panoptic_separated"]
13
-
14
-
15
- def register_coco_instances(name, metadata, json_file, image_root):
16
- """
17
- Register a dataset in COCO's json annotation format for
18
- instance detection, instance segmentation and keypoint detection.
19
- (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
20
- `instances*.json` and `person_keypoints*.json` in the dataset).
21
-
22
- This is an example of how to register a new dataset.
23
- You can do something similar to this function, to register new datasets.
24
-
25
- Args:
26
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
27
- metadata (dict): extra metadata associated with this dataset. You can
28
- leave it as an empty dict.
29
- json_file (str): path to the json instance annotation file.
30
- image_root (str or path-like): directory which contains all the images.
31
- """
32
- # 1. register a function which returns dicts
33
- DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
34
-
35
- # 2. Optionally, add metadata about this dataset,
36
- # since they might be useful in evaluation, visualization or logging
37
- MetadataCatalog.get(name).set(
38
- json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
39
- )
40
-
41
-
42
- def register_coco_panoptic_separated(
43
- name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
44
- ):
45
- """
46
- Register a COCO panoptic segmentation dataset named `name`.
47
- The annotations in this registered dataset will contain both instance annotations and
48
- semantic annotations, each with its own contiguous ids. Hence it's called "separated".
49
-
50
- It follows the setting used by the PanopticFPN paper:
51
-
52
- 1. The instance annotations directly come from polygons in the COCO
53
- instances annotation task, rather than from the masks in the COCO panoptic annotations.
54
-
55
- The two format have small differences:
56
- Polygons in the instance annotations may have overlaps.
57
- The mask annotations are produced by labeling the overlapped polygons
58
- with depth ordering.
59
-
60
- 2. The semantic annotations are converted from panoptic annotations, where
61
- all "things" are assigned a semantic id of 0.
62
- All semantic categories will therefore have ids in contiguous
63
- range [1, #stuff_categories].
64
-
65
- This function will also register a pure semantic segmentation dataset
66
- named ``name + '_stuffonly'``.
67
-
68
- Args:
69
- name (str): the name that identifies a dataset,
70
- e.g. "coco_2017_train_panoptic"
71
- metadata (dict): extra metadata associated with this dataset.
72
- image_root (str): directory which contains all the images
73
- panoptic_root (str): directory which contains panoptic annotation images
74
- panoptic_json (str): path to the json panoptic annotation file
75
- sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
76
- instances_json (str): path to the json instance annotation file
77
- """
78
- panoptic_name = name + "_separated"
79
- DatasetCatalog.register(
80
- panoptic_name,
81
- lambda: merge_to_panoptic(
82
- load_coco_json(instances_json, image_root, panoptic_name),
83
- load_sem_seg(sem_seg_root, image_root),
84
- ),
85
- )
86
- MetadataCatalog.get(panoptic_name).set(
87
- panoptic_root=panoptic_root,
88
- image_root=image_root,
89
- panoptic_json=panoptic_json,
90
- sem_seg_root=sem_seg_root,
91
- json_file=instances_json, # TODO rename
92
- evaluator_type="coco_panoptic_seg",
93
- **metadata
94
- )
95
-
96
- semantic_name = name + "_stuffonly"
97
- DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
98
- MetadataCatalog.get(semantic_name).set(
99
- sem_seg_root=sem_seg_root, image_root=image_root, evaluator_type="sem_seg", **metadata
100
- )
101
-
102
-
103
- def merge_to_panoptic(detection_dicts, sem_seg_dicts):
104
- """
105
- Create dataset dicts for panoptic segmentation, by
106
- merging two dicts using "file_name" field to match their entries.
107
-
108
- Args:
109
- detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
110
- sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
111
-
112
- Returns:
113
- list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
114
- both detection_dicts and sem_seg_dicts that correspond to the same image.
115
- The function assumes that the same key in different dicts has the same value.
116
- """
117
- results = []
118
- sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
119
- assert len(sem_seg_file_to_entry) > 0
120
-
121
- for det_dict in detection_dicts:
122
- dic = copy.copy(det_dict)
123
- dic.update(sem_seg_file_to_entry[dic["file_name"]])
124
- results.append(dic)
125
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/transforms/transform.py DELETED
@@ -1,139 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
- # File: transform.py
4
-
5
- import numpy as np
6
- from fvcore.transforms.transform import HFlipTransform, NoOpTransform, Transform
7
- from PIL import Image
8
-
9
- __all__ = ["ExtentTransform", "ResizeTransform"]
10
-
11
-
12
- class ExtentTransform(Transform):
13
- """
14
- Extracts a subregion from the source image and scales it to the output size.
15
-
16
- The fill color is used to map pixels from the source rect that fall outside
17
- the source image.
18
-
19
- See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
20
- """
21
-
22
- def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
23
- """
24
- Args:
25
- src_rect (x0, y0, x1, y1): src coordinates
26
- output_size (h, w): dst image size
27
- interp: PIL interpolation methods
28
- fill: Fill color used when src_rect extends outside image
29
- """
30
- super().__init__()
31
- self._set_attributes(locals())
32
-
33
- def apply_image(self, img, interp=None):
34
- h, w = self.output_size
35
- ret = Image.fromarray(img).transform(
36
- size=(w, h),
37
- method=Image.EXTENT,
38
- data=self.src_rect,
39
- resample=interp if interp else self.interp,
40
- fill=self.fill,
41
- )
42
- return np.asarray(ret)
43
-
44
- def apply_coords(self, coords):
45
- # Transform image center from source coordinates into output coordinates
46
- # and then map the new origin to the corner of the output image.
47
- h, w = self.output_size
48
- x0, y0, x1, y1 = self.src_rect
49
- new_coords = coords.astype(np.float32)
50
- new_coords[:, 0] -= 0.5 * (x0 + x1)
51
- new_coords[:, 1] -= 0.5 * (y0 + y1)
52
- new_coords[:, 0] *= w / (x1 - x0)
53
- new_coords[:, 1] *= h / (y1 - y0)
54
- new_coords[:, 0] += 0.5 * w
55
- new_coords[:, 1] += 0.5 * h
56
- return new_coords
57
-
58
- def apply_segmentation(self, segmentation):
59
- segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
60
- return segmentation
61
-
62
-
63
- class ResizeTransform(Transform):
64
- """
65
- Resize the image to a target size.
66
- """
67
-
68
- def __init__(self, h, w, new_h, new_w, interp):
69
- """
70
- Args:
71
- h, w (int): original image size
72
- new_h, new_w (int): new image size
73
- interp: PIL interpolation methods
74
- """
75
- # TODO decide on PIL vs opencv
76
- super().__init__()
77
- self._set_attributes(locals())
78
-
79
- def apply_image(self, img, interp=None):
80
- assert img.shape[:2] == (self.h, self.w)
81
- pil_image = Image.fromarray(img)
82
- interp_method = interp if interp is not None else self.interp
83
- pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
84
- ret = np.asarray(pil_image)
85
- return ret
86
-
87
- def apply_coords(self, coords):
88
- coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
89
- coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
90
- return coords
91
-
92
- def apply_segmentation(self, segmentation):
93
- segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
94
- return segmentation
95
-
96
-
97
- def HFlip_rotated_box(transform, rotated_boxes):
98
- """
99
- Apply the horizontal flip transform on rotated boxes.
100
-
101
- Args:
102
- rotated_boxes (ndarray): Nx5 floating point array of
103
- (x_center, y_center, width, height, angle_degrees) format
104
- in absolute coordinates.
105
- """
106
- # Transform x_center
107
- rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
108
- # Transform angle
109
- rotated_boxes[:, 4] = -rotated_boxes[:, 4]
110
- return rotated_boxes
111
-
112
-
113
- def Resize_rotated_box(transform, rotated_boxes):
114
- """
115
- Apply the resizing transform on rotated boxes. For details of how these (approximation)
116
- formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
117
-
118
- Args:
119
- rotated_boxes (ndarray): Nx5 floating point array of
120
- (x_center, y_center, width, height, angle_degrees) format
121
- in absolute coordinates.
122
- """
123
- scale_factor_x = transform.new_w * 1.0 / transform.w
124
- scale_factor_y = transform.new_h * 1.0 / transform.h
125
- rotated_boxes[:, 0] *= scale_factor_x
126
- rotated_boxes[:, 1] *= scale_factor_y
127
- theta = rotated_boxes[:, 4] * np.pi / 180.0
128
- c = np.cos(theta)
129
- s = np.sin(theta)
130
- rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
131
- rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
132
- rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
133
-
134
- return rotated_boxes
135
-
136
-
137
- HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
138
- NoOpTransform.register_type("rotated_box", lambda t, x: x)
139
- ResizeTransform.register_type("rotated_box", Resize_rotated_box)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/DualStyleGAN/app.py DELETED
@@ -1,204 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import pathlib
6
-
7
- import gradio as gr
8
-
9
- from dualstylegan import Model
10
-
11
- DESCRIPTION = '''# Portrait Style Transfer with [DualStyleGAN](https://github.com/williamyang1991/DualStyleGAN)
12
-
13
- <img id="overview" alt="overview" src="https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images/overview.jpg" />
14
- '''
15
-
16
-
17
- def get_style_image_url(style_name: str) -> str:
18
- base_url = 'https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images'
19
- filenames = {
20
- 'cartoon': 'cartoon_overview.jpg',
21
- 'caricature': 'caricature_overview.jpg',
22
- 'anime': 'anime_overview.jpg',
23
- 'arcane': 'Reconstruction_arcane_overview.jpg',
24
- 'comic': 'Reconstruction_comic_overview.jpg',
25
- 'pixar': 'Reconstruction_pixar_overview.jpg',
26
- 'slamdunk': 'Reconstruction_slamdunk_overview.jpg',
27
- }
28
- return f'{base_url}/{filenames[style_name]}'
29
-
30
-
31
- def get_style_image_markdown_text(style_name: str) -> str:
32
- url = get_style_image_url(style_name)
33
- return f'<img id="style-image" src="{url}" alt="style image">'
34
-
35
-
36
- def update_slider(choice: str) -> dict:
37
- max_vals = {
38
- 'cartoon': 316,
39
- 'caricature': 198,
40
- 'anime': 173,
41
- 'arcane': 99,
42
- 'comic': 100,
43
- 'pixar': 121,
44
- 'slamdunk': 119,
45
- }
46
- return gr.update(maximum=max_vals[choice])
47
-
48
-
49
- def update_style_image(style_name: str) -> dict:
50
- text = get_style_image_markdown_text(style_name)
51
- return gr.update(value=text)
52
-
53
-
54
- model = Model()
55
-
56
- with gr.Blocks(css='style.css') as demo:
57
- gr.Markdown(DESCRIPTION)
58
-
59
- with gr.Box():
60
- gr.Markdown('''## Step 1 (Preprocess Input Image)
61
-
62
- - Drop an image containing a near-frontal face to the **Input Image**.
63
- - If there are multiple faces in the image, hit the Edit button in the upper right corner and crop the input image beforehand.
64
- - Hit the **Preprocess** button.
65
- - Choose the encoder version. Default is Z+ encoder which has better stylization performance. W+ encoder better reconstructs the input image to preserve more details.
66
- - The final result will be based on this **Reconstructed Face**. So, if the reconstructed image is not satisfactory, you may want to change the input image.
67
- ''')
68
- with gr.Row():
69
- encoder_type = gr.Radio(label='Encoder Type',
70
- choices=[
71
- 'Z+ encoder (better stylization)',
72
- 'W+ encoder (better reconstruction)'
73
- ],
74
- value='Z+ encoder (better stylization)')
75
- with gr.Row():
76
- with gr.Column():
77
- with gr.Row():
78
- input_image = gr.Image(label='Input Image',
79
- type='filepath')
80
- with gr.Row():
81
- preprocess_button = gr.Button('Preprocess')
82
- with gr.Column():
83
- with gr.Row():
84
- aligned_face = gr.Image(label='Aligned Face',
85
- type='numpy',
86
- interactive=False)
87
- with gr.Column():
88
- reconstructed_face = gr.Image(label='Reconstructed Face',
89
- type='numpy')
90
- instyle = gr.State()
91
-
92
- with gr.Row():
93
- paths = sorted(pathlib.Path('images').glob('*.jpg'))
94
- gr.Examples(examples=[[path.as_posix()] for path in paths],
95
- inputs=input_image)
96
-
97
- with gr.Box():
98
- gr.Markdown('''## Step 2 (Select Style Image)
99
-
100
- - Select **Style Type**.
101
- - Select **Style Image Index** from the image table below.
102
- ''')
103
- with gr.Row():
104
- with gr.Column():
105
- style_type = gr.Radio(label='Style Type',
106
- choices=model.style_types,
107
- value=model.style_types[0])
108
- text = get_style_image_markdown_text('cartoon')
109
- style_image = gr.Markdown(value=text)
110
- style_index = gr.Slider(label='Style Image Index',
111
- minimum=0,
112
- maximum=316,
113
- step=1,
114
- value=26)
115
-
116
- with gr.Row():
117
- gr.Examples(
118
- examples=[
119
- ['cartoon', 26],
120
- ['caricature', 65],
121
- ['arcane', 63],
122
- ['pixar', 80],
123
- ],
124
- inputs=[style_type, style_index],
125
- )
126
-
127
- with gr.Box():
128
- gr.Markdown('''## Step 3 (Generate Style Transferred Image)
129
-
130
- - Adjust **Structure Weight** and **Color Weight**.
131
- - These are weights for the style image, so the larger the value, the closer the resulting image will be to the style image.
132
- - Tips: For W+ encoder, better way of (Structure Only) is to uncheck (Structure Only) and set Color weight to 0.
133
- - Hit the **Generate** button.
134
- ''')
135
- with gr.Row():
136
- with gr.Column():
137
- with gr.Row():
138
- structure_weight = gr.Slider(label='Structure Weight',
139
- minimum=0,
140
- maximum=1,
141
- step=0.1,
142
- value=0.6)
143
- with gr.Row():
144
- color_weight = gr.Slider(label='Color Weight',
145
- minimum=0,
146
- maximum=1,
147
- step=0.1,
148
- value=1)
149
- with gr.Row():
150
- structure_only = gr.Checkbox(label='Structure Only',
151
- value=False)
152
- with gr.Row():
153
- generate_button = gr.Button('Generate')
154
-
155
- with gr.Column():
156
- result = gr.Image(label='Result')
157
-
158
- with gr.Row():
159
- gr.Examples(
160
- examples=[
161
- [0.6, 1.0],
162
- [0.3, 1.0],
163
- [0.0, 1.0],
164
- [1.0, 0.0],
165
- ],
166
- inputs=[structure_weight, color_weight],
167
- )
168
-
169
- preprocess_button.click(
170
- fn=model.detect_and_align_face,
171
- inputs=[input_image],
172
- outputs=aligned_face,
173
- )
174
- aligned_face.change(
175
- fn=model.reconstruct_face,
176
- inputs=[aligned_face, encoder_type],
177
- outputs=[
178
- reconstructed_face,
179
- instyle,
180
- ],
181
- )
182
- style_type.change(
183
- fn=update_slider,
184
- inputs=style_type,
185
- outputs=style_index,
186
- )
187
- style_type.change(
188
- fn=update_style_image,
189
- inputs=style_type,
190
- outputs=style_image,
191
- )
192
- generate_button.click(
193
- fn=model.generate,
194
- inputs=[
195
- style_type,
196
- style_index,
197
- structure_weight,
198
- color_weight,
199
- structure_only,
200
- instyle,
201
- ],
202
- outputs=result,
203
- )
204
- demo.queue(max_size=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/cmake/PrintNinjaBuildTimes.cmake DELETED
@@ -1,101 +0,0 @@
1
- ## This CMake script parses a .ninja_log file (LOGFILE) and prints a list of
2
- ## build/link times, sorted longest first.
3
- ##
4
- ## cmake -DLOGFILE=<.ninja_log file> \
5
- ## -P PrintNinjaBuildTimes.cmake
6
- ##
7
- ## If LOGFILE is omitted, the current directory's .ninja_log file is used.
8
- ################################################################################
9
-
10
- cmake_minimum_required(VERSION 3.15)
11
-
12
- # Prepend the string with "0" until the string length equals the specified width
13
- function(pad_string_with_zeros string_var width)
14
- set(local_string "${${string_var}}")
15
- string(LENGTH "${local_string}" size)
16
- while(size LESS width)
17
- string(PREPEND local_string "0")
18
- string(LENGTH "${local_string}" size)
19
- endwhile()
20
- set(${string_var} "${local_string}" PARENT_SCOPE)
21
- endfunction()
22
-
23
- ################################################################################
24
-
25
- if (NOT LOGFILE)
26
- set(LOGFILE ".ninja_log")
27
- endif()
28
-
29
- # Check if logfile exists
30
- if (NOT EXISTS "${LOGFILE}")
31
- message(FATAL_ERROR "LOGFILE does not exist ('${LOGFILE}').")
32
- endif()
33
-
34
- # Read the logfile and generate a map / keylist
35
- set(keys)
36
- file(STRINGS "${LOGFILE}" lines)
37
- foreach(line ${lines})
38
-
39
- # Parse each build time
40
- string(REGEX MATCH
41
- "^([0-9]+)\t([0-9]+)\t[0-9]+\t([^\t]+)+\t[0-9a-fA-F]+$" _DUMMY "${line}")
42
-
43
- if (CMAKE_MATCH_COUNT EQUAL 3)
44
- set(start_ms ${CMAKE_MATCH_1})
45
- set(end_ms ${CMAKE_MATCH_2})
46
- set(command "${CMAKE_MATCH_3}")
47
- math(EXPR runtime_ms "${end_ms} - ${start_ms}")
48
-
49
- # Compute human readable time
50
- math(EXPR days "${runtime_ms} / (1000 * 60 * 60 * 24)")
51
- math(EXPR runtime_ms "${runtime_ms} - (${days} * 1000 * 60 * 60 * 24)")
52
- math(EXPR hours "${runtime_ms} / (1000 * 60 * 60)")
53
- math(EXPR runtime_ms "${runtime_ms} - (${hours} * 1000 * 60 * 60)")
54
- math(EXPR minutes "${runtime_ms} / (1000 * 60)")
55
- math(EXPR runtime_ms "${runtime_ms} - (${minutes} * 1000 * 60)")
56
- math(EXPR seconds "${runtime_ms} / 1000")
57
- math(EXPR milliseconds "${runtime_ms} - (${seconds} * 1000)")
58
-
59
- # Format time components
60
- pad_string_with_zeros(days 3)
61
- pad_string_with_zeros(hours 2)
62
- pad_string_with_zeros(minutes 2)
63
- pad_string_with_zeros(seconds 2)
64
- pad_string_with_zeros(milliseconds 3)
65
-
66
- # Construct table entry
67
- # Later values in the file for the same command overwrite earlier entries
68
- string(MAKE_C_IDENTIFIER "${command}" key)
69
- set(ENTRY_${key}
70
- "${days}d ${hours}h ${minutes}m ${seconds}s ${milliseconds}ms | ${command}"
71
- )
72
-
73
- # Record the key:
74
- list(APPEND keys "${key}")
75
- endif()
76
- endforeach()
77
-
78
- list(REMOVE_DUPLICATES keys)
79
-
80
- # Build the entry list:
81
- set(entries)
82
- foreach(key ${keys})
83
- list(APPEND entries "${ENTRY_${key}}")
84
- endforeach()
85
-
86
- if (NOT entries)
87
- message(FATAL_ERROR "LOGFILE contained no build entries ('${LOGFILE}').")
88
- endif()
89
-
90
- # Sort in descending order:
91
- list(SORT entries)
92
- list(REVERSE entries)
93
-
94
- # Dump table:
95
- message(STATUS "-----------------------+----------------------------")
96
- message(STATUS "Time | Command ")
97
- message(STATUS "-----------------------+----------------------------")
98
-
99
- foreach(entry ${entries})
100
- message(STATUS ${entry})
101
- endforeach()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/get_value.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits get_value
22
- #include <thrust/system/detail/sequential/get_value.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/marriage/__init__.py DELETED
@@ -1,27 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
-
8
- img_dir = Path(__file__).parent / "images"
9
-
10
-
11
- def marriage(images: List[BuildImage], texts, args):
12
- img = images[0].convert("RGBA").resize_height(1080)
13
- img_w, img_h = img.size
14
- if img_w > 1500:
15
- img_w = 1500
16
- elif img_w < 800:
17
- img_h = int(img_h * img_w / 800)
18
- frame = img.resize_canvas((img_w, img_h)).resize_height(1080)
19
- left = BuildImage.open(img_dir / "0.png")
20
- right = BuildImage.open(img_dir / "1.png")
21
- frame.paste(left, alpha=True).paste(
22
- right, (frame.width - right.width, 0), alpha=True
23
- )
24
- return frame.save_jpg()
25
-
26
-
27
- add_meme("marriage", marriage, min_images=1, max_images=1, keywords=["结婚申请", "结婚登记"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CodingBillionaire/bark-voice-cloning/hubert/pre_kmeans_hubert.py DELETED
@@ -1,85 +0,0 @@
1
- from pathlib import Path
2
-
3
- import torch
4
- from torch import nn
5
- from einops import pack, unpack
6
-
7
- import fairseq
8
-
9
- from torchaudio.functional import resample
10
-
11
- import logging
12
- logging.root.setLevel(logging.ERROR)
13
-
14
-
15
- def exists(val):
16
- return val is not None
17
-
18
-
19
- def default(val, d):
20
- return val if exists(val) else d
21
-
22
-
23
- class CustomHubert(nn.Module):
24
- """
25
- checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
26
- or you can train your own
27
- """
28
-
29
- def __init__(
30
- self,
31
- checkpoint_path,
32
- target_sample_hz=16000,
33
- seq_len_multiple_of=None,
34
- output_layer=9
35
- ):
36
- super().__init__()
37
- self.target_sample_hz = target_sample_hz
38
- self.seq_len_multiple_of = seq_len_multiple_of
39
- self.output_layer = output_layer
40
-
41
- model_path = Path(checkpoint_path)
42
-
43
- assert model_path.exists(), f'path {checkpoint_path} does not exist'
44
-
45
- checkpoint = torch.load(checkpoint_path)
46
- load_model_input = {checkpoint_path: checkpoint}
47
- model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
48
-
49
- self.model = model[0]
50
- self.model.eval()
51
-
52
- @property
53
- def groups(self):
54
- return 1
55
-
56
- @torch.no_grad()
57
- def forward(
58
- self,
59
- wav_input,
60
- flatten=True,
61
- input_sample_hz=None
62
- ):
63
- device = wav_input.device
64
-
65
- if exists(input_sample_hz):
66
- wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
67
-
68
- embed = self.model(
69
- wav_input,
70
- features_only=True,
71
- mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
72
- output_layer=self.output_layer
73
- )
74
-
75
- embed, packed_shape = pack([embed['x']], '* d')
76
-
77
- # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
78
-
79
- codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
80
-
81
- if flatten:
82
- return codebook_indices
83
-
84
- codebook_indices, = unpack(codebook_indices, packed_shape, '*')
85
- return codebook_indices
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/tv/public/index.html DELETED
@@ -1,325 +0,0 @@
1
- <html>
2
- <head>
3
- <title>☕ CofTV</title>
4
- <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/full.css" rel="stylesheet" type="text/css" />
5
- <!--<link href="https://vjs.zencdn.net/8.3.0/video-js.css" rel="stylesheet" />-->
6
- <script src="/mpegts.js"></script>
7
- </head>
8
- <body
9
- x-data="app()" x-init="init()"
10
- class="fixed inset-0 bg-[rgb(0,0,0)] flex flex-col w-full items-center justify-start">
11
- <div x-show="!enabled">Loading CofTV...</div>
12
-
13
- <div
14
- x-show="enabled && showToolbar"
15
- x-transition:enter="transition ease-out duration-100"
16
- x-transition:enter-start="opacity-0 -translate-y-8"
17
- x-transition:enter-end="opacity-100"
18
- x-transition:leave="transition ease-in duration-200"
19
- x-transition:leave-start="opacity-100"
20
- x-transition:leave-end="opacity-0 -translate-y-8"
21
- class="fixed w-full z-20 py-4 px-6 top-0 font-mono text-white flex flex-col lg:flex-row items-center justify-between space-x-1 bg-black bg-opacity-60"
22
- style="text-shadow: 0px 0px 3px #000000">
23
-
24
- <div class="flex text-xl space-x-2">
25
- <div class="text-xl">☕ CofTV </div>
26
- <div class="text-md">👉 Текущий канал:</div>
27
- <template x-for="chan in channels">
28
- <div
29
- class="text-xl mr-2"
30
- :class="chan.id === channel.id
31
- ? 'font-bold'
32
- : 'hover:underline opacity-60 hover:opacity-80 cursor-pointer'"
33
- x-on:click="window.location = `${window.location.origin}/?channel=${chan.id}`"
34
- x-text="chan.label">
35
- <div class="animate-ping absolute inline-flex h-4 w-4 rounded-full bg-red-400 opacity-75"></div>
36
- </div>
37
- </template>
38
- </div>
39
-
40
- <div class="flex justify-between space-x-6 items-center">
41
-
42
- <div class="flex items-center justify-center text-white opacity-100 space-x-2">
43
- <div>
44
- <svg xmlns="http://www.w3.org/2000/svg" width="24px" height="24px" viewBox="0 0 640 512"><path fill="currentColor" d="M96 128a128 128 0 1 1 256 0A128 128 0 1 1 96 128zM0 482.3C0 383.8 79.8 304 178.3 304h91.4C368.2 304 448 383.8 448 482.3c0 16.4-13.3 29.7-29.7 29.7H29.7C13.3 512 0 498.7 0 482.3zM609.3 512H471.4c5.4-9.4 8.6-20.3 8.6-32v-8c0-60.7-27.1-115.2-69.8-151.8c2.4-.1 4.7-.2 7.1-.2h61.4C567.8 320 640 392.2 640 481.3c0 17-13.8 30.7-30.7 30.7zM432 256c-31 0-59-12.6-79.3-32.9C372.4 196.5 384 163.6 384 128c0-26.8-6.6-52.1-18.3-74.3C384.3 40.1 407.2 32 432 32c61.9 0 112 50.1 112 112s-50.1 112-112 112z"/></svg>
45
- </div>
46
- <div x-text="channel.audience"></div>
47
- <div x-text="channel.audience > 1 ? 'viewers' : '🟢 Онлайн'"></div>
48
- </div>
49
-
50
- <div class="text-sm">(<a
51
- class="hover:underline"
52
- href="https://huggingface.co/facebook/musicgen-melody"
53
- target="_blank">musicgen-melody</a> + <a
54
- class="hover:underline"
55
- :href="channel.modelUrl"
56
- x-text="channel.model"
57
- target="_blank"></a>)</div>
58
-
59
- <div
60
- x-on:click="toggleAudio()"
61
- class="flex items-center justify-center text-white opacity-80 hover:opacity-100 cursor-pointer">
62
- <div x-show="muted">
63
- <svg aria-hidden="true" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" width="32px" height="32px"><path fill="currentColor" d="M215.03 71.05L126.06 160H24c-13.26 0-24 10.74-24 24v144c0 13.25 10.74 24 24 24h102.06l88.97 88.95c15.03 15.03 40.97 4.47 40.97-16.97V88.02c0-21.46-25.96-31.98-40.97-16.97zM461.64 256l45.64-45.64c6.3-6.3 6.3-16.52 0-22.82l-22.82-22.82c-6.3-6.3-16.52-6.3-22.82 0L416 210.36l-45.64-45.64c-6.3-6.3-16.52-6.3-22.82 0l-22.82 22.82c-6.3 6.3-6.3 16.52 0 22.82L370.36 256l-45.63 45.63c-6.3 6.3-6.3 16.52 0 22.82l22.82 22.82c6.3 6.3 16.52 6.3 22.82 0L416 301.64l45.64 45.64c6.3 6.3 16.52 6.3 22.82 0l22.82-22.82c6.3-6.3 6.3-16.52 0-22.82L461.64 256z" class=""></path></svg>
64
- </div>
65
- <div x-show="!muted">
66
- <svg aria-hidden="true" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 480 512" width="32px" height="32px"><path fill="currentColor" d="M215.03 71.05L126.06 160H24c-13.26 0-24 10.74-24 24v144c0 13.25 10.74 24 24 24h102.06l88.97 88.95c15.03 15.03 40.97 4.47 40.97-16.97V88.02c0-21.46-25.96-31.98-40.97-16.97zM480 256c0-63.53-32.06-121.94-85.77-156.24-11.19-7.14-26.03-3.82-33.12 7.46s-3.78 26.21 7.41 33.36C408.27 165.97 432 209.11 432 256s-23.73 90.03-63.48 115.42c-11.19 7.14-14.5 22.07-7.41 33.36 6.51 10.36 21.12 15.14 33.12 7.46C447.94 377.94 480 319.53 480 256zm-141.77-76.87c-11.58-6.33-26.19-2.16-32.61 9.45-6.39 11.61-2.16 26.2 9.45 32.61C327.98 228.28 336 241.63 336 256c0 14.38-8.02 27.72-20.92 34.81-11.61 6.41-15.84 21-9.45 32.61 6.43 11.66 21.05 15.8 32.61 9.45 28.23-15.55 45.77-45 45.77-76.88s-17.54-61.32-45.78-76.86z" class=""></path></svg>
67
- </div>
68
- </div>
69
- <div
70
- x-on:click="fullscreen()"
71
- class="text-white hover:text-white opacity-80 hover:opacity-100 cursor-pointer">
72
- <?xml version="1.0" ?><svg version="1.1" viewBox="0 0 14 14" width="24px" height="24px" xmlns="http://www.w3.org/2000/svg" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns" xmlns:xlink="http://www.w3.org/1999/xlink"><title/><desc/><defs/><g fill="none" fill-rule="evenodd" id="Page-1" stroke="none" stroke-width="1"><g fill="currentColor" id="Core" transform="translate(-215.000000, -257.000000)"><g id="fullscreen" transform="translate(215.000000, 257.000000)"><path d="M2,9 L0,9 L0,14 L5,14 L5,12 L2,12 L2,9 L2,9 Z M0,5 L2,5 L2,2 L5,2 L5,0 L0,0 L0,5 L0,5 Z M12,12 L9,12 L9,14 L14,14 L14,9 L12,9 L12,12 L12,12 Z M9,0 L9,2 L12,2 L12,5 L14,5 L14,0 L9,0 L9,0 Z" id="Shape"/></g></g></g></svg>
73
- </div>
74
- </div>
75
- </div>
76
- <div class="flex w-full">
77
- <video id="videoElement" muted autoplay class="aspect-video w-full"></video>
78
- <!--
79
- We probably want to display a nice logo or decoration somewhere
80
- <img src="/hf-logo.png" class="absolute mt-2 w-[16%]" />
81
- -->
82
- </div>
83
- <script>
84
- // disable analytics (we don't use VideoJS yet anyway)
85
- window.HELP_IMPROVE_VIDEOJS = false
86
- </script>
87
- <script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/cdn.min.js"></script>
88
- <script src="https://cdn.tailwindcss.com?plugins=forms,typography,aspect-ratio"></script>
89
- <script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.2/iframeResizer.contentWindow.min.js"></script>
90
- <!--<script src="https://vjs.zencdn.net/8.3.0/video.min.js"></script>-->
91
- <script>
92
-
93
- function app() {
94
- return {
95
- enabled: false,
96
- channels: {
97
- /*
98
- legacy: {
99
- id: 'legacy',
100
- label: '#older',
101
- audience: 0,
102
- online: false,
103
- visible: false,
104
- url: 'https://jbilcke-hf-media-server.hf.space/live/legacy.flv',
105
- resolution: '576x320',
106
- model: 'zeroscope_v2_576w',
107
- modelUrl: 'https://huggingface.co/cerspense/zeroscope_v2_576w',
108
- },
109
- */
110
- /*
111
- hdtv: {
112
- id: 'hdtv',
113
- label: '#old',
114
- audience: 0,
115
- online: false,
116
- visible: true,
117
- url: 'https://jbilcke-hf-media-server.hf.space/live/hdtv.flv',
118
- resolution: '1024x576_8FPS',
119
- model: 'zeroscope_v2_XL',
120
- modelUrl: 'https://huggingface.co/cerspense/zeroscope_v2_XL',
121
- },
122
- */
123
- random: {
124
- id: 'random',
125
- label: 'Главный',
126
- audience: 0,
127
- online: false,
128
- visible: true,
129
- url: 'https://jbilcke-hf-media-server.hf.space/live/random.flv',
130
- resolution: '1024x576_24FPS',
131
- model: 'zeroscope_v2_XL',
132
- modelUrl: 'https://huggingface.co/cerspense/zeroscope_v2_XL',
133
- },
134
- comedy: {
135
- id: 'comedy',
136
- label: 'Интересный',
137
- audience: 0,
138
- online: false,
139
- visible: true,
140
- url: 'https://jbilcke-hf-media-server.hf.space/live/comedy.flv',
141
- resolution: '1024x576_24FPS',
142
- model: 'zeroscope_v2_XL',
143
- modelUrl: 'https://huggingface.co/cerspense/zeroscope_v2_XL',
144
- },
145
- documentary: {
146
- id: 'documentary',
147
- label: 'Документальный',
148
- audience: 0,
149
- online: false,
150
- visible: true,
151
- url: 'https://jbilcke-hf-media-server.hf.space/live/documentary.flv',
152
- resolution: '1024x576_24FPS',
153
- model: 'zeroscope_v2_XL',
154
- modelUrl: 'https://huggingface.co/cerspense/zeroscope_v2_XL',
155
- },
156
- },
157
- showToolbar: true,
158
- muted: true,
159
- initialized: false,
160
- activityTimeout: null,
161
- defaultChannelId: 'random',
162
- video: null,
163
- channel: {
164
- },
165
- wakeUp() {
166
- this.showToolbar = true
167
- clearTimeout(this.activityTimeout)
168
- this.activityTimeout = setTimeout(() => {
169
- this.showToolbar = false
170
- }, 1500);
171
- },
172
- toggleAudio() {
173
- if (this.video.muted) {
174
- this.video.muted = false
175
- this.muted = false
176
- } else {
177
- this.video.muted = true
178
- this.muted = true
179
- }
180
- },
181
- async checkAudience() {
182
- let audience = {}
183
- try {
184
- const res = await fetch('/stats')
185
- audience = await res.json()
186
- } catch (err) {
187
- console.log('failed to check the audience, something is wrong')
188
- }
189
-
190
- window.DEBUGME = Object.entries(this.channels)
191
- this.channels = Object.entries(this.channels).reduce((acc, [channel, data]) => ((console.log('debug:', {
192
- ...data,
193
- audience: audience[channel] || 0
194
- } ), {
195
- ...acc,
196
- [channel]: {
197
- ...data,
198
- audience: audience[channel] || 0
199
- }
200
- })), {})
201
- this.channel = this.channels[this.channel.id]
202
- },
203
- fullscreen() {
204
- if (this.video.requestFullscreen) {
205
- this.video.requestFullscreen();
206
- } else if (this.video.mozRequestFullScreen) {
207
- this.video.mozRequestFullScreen();
208
- } else if (this.video.webkitRequestFullscreen) {
209
- this.video.webkitRequestFullscreen();
210
- } else if (this.video.msRequestFullscreen) {
211
- this.video.msRequestFullscreen();
212
- }
213
- },
214
- init() {
215
- if (this.initialized) {
216
- console.log("already initialized")
217
- return
218
- }
219
- this.initialized = true
220
- console.log('Иницилизация CofTV')
221
-
222
- const urlParams = new URLSearchParams(window.location.search)
223
-
224
- const requestedChannelId = `${urlParams.get('channel') || 'random'}`
225
-
226
- this.enabled = true
227
- // this.enabled = `${urlParams.get('beta') || 'false'}` === 'true'
228
-
229
- if (!this.enabled) {
230
- return
231
- }
232
-
233
- this.video = document.getElementById('videoElement')
234
-
235
- const defaultChannel = this.channels[this.defaultChannelId]
236
-
237
- this.channel = this.channels[requestedChannelId] || defaultChannel
238
-
239
- console.log(`Selected channel: ${this.channel.label}`)
240
- console.log(`Stream URL: ${this.channel.url}`)
241
-
242
-
243
- const handleActivity = () => {
244
- this.wakeUp()
245
- }
246
- handleActivity()
247
-
248
- document.addEventListener("touchstart", handleActivity)
249
- document.addEventListener("touchmove", handleActivity)
250
- document.addEventListener("click", handleActivity)
251
- document.addEventListener("mousemove", handleActivity)
252
-
253
- this.checkAudience()
254
- setInterval(() => {
255
- this.checkAudience()
256
- }, 1000)
257
-
258
- // detect mute/unmute events
259
- this.video.addEventListener("mute", () => {
260
- this.muted = true
261
- })
262
- this.video.addEventListener("unmute", () => {
263
- this.muted = false
264
- })
265
-
266
- // when we move outside the video, we always hide the toolbar
267
- document.addEventListener("mouseleave", () => {
268
- clearTimeout(this.activityTimeout)
269
- this.showToolbar = false
270
- })
271
-
272
- // as a bonus, we also allow fullscreen on double click
273
- this.video.addEventListener('dblclick', () => {
274
- this.fullscreen()
275
- })
276
-
277
- // some devices such as the iPhone don't support MSE Live Playback
278
- if (mpegts.getFeatureList().mseLivePlayback) {
279
- var player = mpegts.createPlayer({
280
- type: 'flv', // could also be mpegts, m2ts, flv
281
- isLive: true,
282
- url: this.channel.url,
283
- })
284
- player.attachMediaElement(this.video)
285
-
286
- player.on(mpegts.Events.ERROR, function (err) {
287
- console.log('got an error:', err)
288
- if (err.type === mpegts.ErrorTypes.NETWORK_ERROR) {
289
- console.log('Network error')
290
- }
291
- });
292
-
293
- player.load()
294
-
295
- // due to an issue with our stream when the FFMPEG playlist ends,
296
- // the stream gets interrupted for ~1sec, which causes the frontend to hangs up
297
- // the following code tries to restart the page when that happens, but in the long term
298
- // we should fix the issue on the server side (fix our FFMPEG bash script)
299
- this.video.addEventListener('ended', function() {
300
- console.log('Stream ended, trying to reload...')
301
- setTimeout(() => {
302
- console.log('Reloading the page..')
303
- // Unloading and loading the source again isn't enough it seems
304
- // player.unload()
305
- // player.load()
306
- window.location.reload()
307
- }, 1200)
308
- }, false)
309
-
310
- // Handle autoplay restrictions.
311
- let promise = this.video.play()
312
- if (promise !== undefined) {
313
- this.video.addEventListener('click', function() {
314
- this.video.play()
315
- })
316
- }
317
-
318
- player.play()
319
- }
320
- }
321
- }
322
- }
323
- </script>
324
- </body>
325
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/image_degradation/bsrgan_light.py DELETED
@@ -1,651 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import numpy as np
3
- import cv2
4
- import torch
5
-
6
- from functools import partial
7
- import random
8
- from scipy import ndimage
9
- import scipy
10
- import scipy.stats as ss
11
- from scipy.interpolate import interp2d
12
- from scipy.linalg import orth
13
- import albumentations
14
-
15
- import ldm.modules.image_degradation.utils_image as util
16
-
17
- """
18
- # --------------------------------------------
19
- # Super-Resolution
20
- # --------------------------------------------
21
- #
22
- # Kai Zhang ([email protected])
23
- # https://github.com/cszn
24
- # From 2019/03--2021/08
25
- # --------------------------------------------
26
- """
27
-
28
- def modcrop_np(img, sf):
29
- '''
30
- Args:
31
- img: numpy image, WxH or WxHxC
32
- sf: scale factor
33
- Return:
34
- cropped image
35
- '''
36
- w, h = img.shape[:2]
37
- im = np.copy(img)
38
- return im[:w - w % sf, :h - h % sf, ...]
39
-
40
-
41
- """
42
- # --------------------------------------------
43
- # anisotropic Gaussian kernels
44
- # --------------------------------------------
45
- """
46
-
47
-
48
- def analytic_kernel(k):
49
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
50
- k_size = k.shape[0]
51
- # Calculate the big kernels size
52
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
53
- # Loop over the small kernel to fill the big one
54
- for r in range(k_size):
55
- for c in range(k_size):
56
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
57
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
58
- crop = k_size // 2
59
- cropped_big_k = big_k[crop:-crop, crop:-crop]
60
- # Normalize to 1
61
- return cropped_big_k / cropped_big_k.sum()
62
-
63
-
64
- def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
65
- """ generate an anisotropic Gaussian kernel
66
- Args:
67
- ksize : e.g., 15, kernel size
68
- theta : [0, pi], rotation angle range
69
- l1 : [0.1,50], scaling of eigenvalues
70
- l2 : [0.1,l1], scaling of eigenvalues
71
- If l1 = l2, will get an isotropic Gaussian kernel.
72
- Returns:
73
- k : kernel
74
- """
75
-
76
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
77
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
78
- D = np.array([[l1, 0], [0, l2]])
79
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
80
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
81
-
82
- return k
83
-
84
-
85
- def gm_blur_kernel(mean, cov, size=15):
86
- center = size / 2.0 + 0.5
87
- k = np.zeros([size, size])
88
- for y in range(size):
89
- for x in range(size):
90
- cy = y - center + 1
91
- cx = x - center + 1
92
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
93
-
94
- k = k / np.sum(k)
95
- return k
96
-
97
-
98
- def shift_pixel(x, sf, upper_left=True):
99
- """shift pixel for super-resolution with different scale factors
100
- Args:
101
- x: WxHxC or WxH
102
- sf: scale factor
103
- upper_left: shift direction
104
- """
105
- h, w = x.shape[:2]
106
- shift = (sf - 1) * 0.5
107
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
108
- if upper_left:
109
- x1 = xv + shift
110
- y1 = yv + shift
111
- else:
112
- x1 = xv - shift
113
- y1 = yv - shift
114
-
115
- x1 = np.clip(x1, 0, w - 1)
116
- y1 = np.clip(y1, 0, h - 1)
117
-
118
- if x.ndim == 2:
119
- x = interp2d(xv, yv, x)(x1, y1)
120
- if x.ndim == 3:
121
- for i in range(x.shape[-1]):
122
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
123
-
124
- return x
125
-
126
-
127
- def blur(x, k):
128
- '''
129
- x: image, NxcxHxW
130
- k: kernel, Nx1xhxw
131
- '''
132
- n, c = x.shape[:2]
133
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
134
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
135
- k = k.repeat(1, c, 1, 1)
136
- k = k.view(-1, 1, k.shape[2], k.shape[3])
137
- x = x.view(1, -1, x.shape[2], x.shape[3])
138
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
139
- x = x.view(n, c, x.shape[2], x.shape[3])
140
-
141
- return x
142
-
143
-
144
- def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
145
- """"
146
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
147
- # Kai Zhang
148
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
149
- # max_var = 2.5 * sf
150
- """
151
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
152
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
153
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
154
- theta = np.random.rand() * np.pi # random theta
155
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
156
-
157
- # Set COV matrix using Lambdas and Theta
158
- LAMBDA = np.diag([lambda_1, lambda_2])
159
- Q = np.array([[np.cos(theta), -np.sin(theta)],
160
- [np.sin(theta), np.cos(theta)]])
161
- SIGMA = Q @ LAMBDA @ Q.T
162
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
163
-
164
- # Set expectation position (shifting kernel for aligned image)
165
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
166
- MU = MU[None, None, :, None]
167
-
168
- # Create meshgrid for Gaussian
169
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
170
- Z = np.stack([X, Y], 2)[:, :, :, None]
171
-
172
- # Calcualte Gaussian for every pixel of the kernel
173
- ZZ = Z - MU
174
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
175
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
176
-
177
- # shift the kernel so it will be centered
178
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
179
-
180
- # Normalize the kernel and return
181
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
182
- kernel = raw_kernel / np.sum(raw_kernel)
183
- return kernel
184
-
185
-
186
- def fspecial_gaussian(hsize, sigma):
187
- hsize = [hsize, hsize]
188
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
189
- std = sigma
190
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
191
- arg = -(x * x + y * y) / (2 * std * std)
192
- h = np.exp(arg)
193
- h[h < scipy.finfo(float).eps * h.max()] = 0
194
- sumh = h.sum()
195
- if sumh != 0:
196
- h = h / sumh
197
- return h
198
-
199
-
200
- def fspecial_laplacian(alpha):
201
- alpha = max([0, min([alpha, 1])])
202
- h1 = alpha / (alpha + 1)
203
- h2 = (1 - alpha) / (alpha + 1)
204
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
205
- h = np.array(h)
206
- return h
207
-
208
-
209
- def fspecial(filter_type, *args, **kwargs):
210
- '''
211
- python code from:
212
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
213
- '''
214
- if filter_type == 'gaussian':
215
- return fspecial_gaussian(*args, **kwargs)
216
- if filter_type == 'laplacian':
217
- return fspecial_laplacian(*args, **kwargs)
218
-
219
-
220
- """
221
- # --------------------------------------------
222
- # degradation models
223
- # --------------------------------------------
224
- """
225
-
226
-
227
- def bicubic_degradation(x, sf=3):
228
- '''
229
- Args:
230
- x: HxWxC image, [0, 1]
231
- sf: down-scale factor
232
- Return:
233
- bicubicly downsampled LR image
234
- '''
235
- x = util.imresize_np(x, scale=1 / sf)
236
- return x
237
-
238
-
239
- def srmd_degradation(x, k, sf=3):
240
- ''' blur + bicubic downsampling
241
- Args:
242
- x: HxWxC image, [0, 1]
243
- k: hxw, double
244
- sf: down-scale factor
245
- Return:
246
- downsampled LR image
247
- Reference:
248
- @inproceedings{zhang2018learning,
249
- title={Learning a single convolutional super-resolution network for multiple degradations},
250
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
251
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
252
- pages={3262--3271},
253
- year={2018}
254
- }
255
- '''
256
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
257
- x = bicubic_degradation(x, sf=sf)
258
- return x
259
-
260
-
261
- def dpsr_degradation(x, k, sf=3):
262
- ''' bicubic downsampling + blur
263
- Args:
264
- x: HxWxC image, [0, 1]
265
- k: hxw, double
266
- sf: down-scale factor
267
- Return:
268
- downsampled LR image
269
- Reference:
270
- @inproceedings{zhang2019deep,
271
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
272
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
273
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
274
- pages={1671--1681},
275
- year={2019}
276
- }
277
- '''
278
- x = bicubic_degradation(x, sf=sf)
279
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
280
- return x
281
-
282
-
283
- def classical_degradation(x, k, sf=3):
284
- ''' blur + downsampling
285
- Args:
286
- x: HxWxC image, [0, 1]/[0, 255]
287
- k: hxw, double
288
- sf: down-scale factor
289
- Return:
290
- downsampled LR image
291
- '''
292
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
293
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
294
- st = 0
295
- return x[st::sf, st::sf, ...]
296
-
297
-
298
- def add_sharpening(img, weight=0.5, radius=50, threshold=10):
299
- """USM sharpening. borrowed from real-ESRGAN
300
- Input image: I; Blurry image: B.
301
- 1. K = I + weight * (I - B)
302
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
303
- 3. Blur mask:
304
- 4. Out = Mask * K + (1 - Mask) * I
305
- Args:
306
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
307
- weight (float): Sharp weight. Default: 1.
308
- radius (float): Kernel size of Gaussian blur. Default: 50.
309
- threshold (int):
310
- """
311
- if radius % 2 == 0:
312
- radius += 1
313
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
314
- residual = img - blur
315
- mask = np.abs(residual) * 255 > threshold
316
- mask = mask.astype('float32')
317
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
318
-
319
- K = img + weight * residual
320
- K = np.clip(K, 0, 1)
321
- return soft_mask * K + (1 - soft_mask) * img
322
-
323
-
324
- def add_blur(img, sf=4):
325
- wd2 = 4.0 + sf
326
- wd = 2.0 + 0.2 * sf
327
-
328
- wd2 = wd2/4
329
- wd = wd/4
330
-
331
- if random.random() < 0.5:
332
- l1 = wd2 * random.random()
333
- l2 = wd2 * random.random()
334
- k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
335
- else:
336
- k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
337
- img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
338
-
339
- return img
340
-
341
-
342
- def add_resize(img, sf=4):
343
- rnum = np.random.rand()
344
- if rnum > 0.8: # up
345
- sf1 = random.uniform(1, 2)
346
- elif rnum < 0.7: # down
347
- sf1 = random.uniform(0.5 / sf, 1)
348
- else:
349
- sf1 = 1.0
350
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
351
- img = np.clip(img, 0.0, 1.0)
352
-
353
- return img
354
-
355
-
356
- # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
357
- # noise_level = random.randint(noise_level1, noise_level2)
358
- # rnum = np.random.rand()
359
- # if rnum > 0.6: # add color Gaussian noise
360
- # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
361
- # elif rnum < 0.4: # add grayscale Gaussian noise
362
- # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
363
- # else: # add noise
364
- # L = noise_level2 / 255.
365
- # D = np.diag(np.random.rand(3))
366
- # U = orth(np.random.rand(3, 3))
367
- # conv = np.dot(np.dot(np.transpose(U), D), U)
368
- # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
369
- # img = np.clip(img, 0.0, 1.0)
370
- # return img
371
-
372
- def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
373
- noise_level = random.randint(noise_level1, noise_level2)
374
- rnum = np.random.rand()
375
- if rnum > 0.6: # add color Gaussian noise
376
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
377
- elif rnum < 0.4: # add grayscale Gaussian noise
378
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
379
- else: # add noise
380
- L = noise_level2 / 255.
381
- D = np.diag(np.random.rand(3))
382
- U = orth(np.random.rand(3, 3))
383
- conv = np.dot(np.dot(np.transpose(U), D), U)
384
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
385
- img = np.clip(img, 0.0, 1.0)
386
- return img
387
-
388
-
389
- def add_speckle_noise(img, noise_level1=2, noise_level2=25):
390
- noise_level = random.randint(noise_level1, noise_level2)
391
- img = np.clip(img, 0.0, 1.0)
392
- rnum = random.random()
393
- if rnum > 0.6:
394
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
395
- elif rnum < 0.4:
396
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
397
- else:
398
- L = noise_level2 / 255.
399
- D = np.diag(np.random.rand(3))
400
- U = orth(np.random.rand(3, 3))
401
- conv = np.dot(np.dot(np.transpose(U), D), U)
402
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
403
- img = np.clip(img, 0.0, 1.0)
404
- return img
405
-
406
-
407
- def add_Poisson_noise(img):
408
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
409
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
410
- if random.random() < 0.5:
411
- img = np.random.poisson(img * vals).astype(np.float32) / vals
412
- else:
413
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
414
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
415
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
416
- img += noise_gray[:, :, np.newaxis]
417
- img = np.clip(img, 0.0, 1.0)
418
- return img
419
-
420
-
421
- def add_JPEG_noise(img):
422
- quality_factor = random.randint(80, 95)
423
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
424
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
425
- img = cv2.imdecode(encimg, 1)
426
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
427
- return img
428
-
429
-
430
- def random_crop(lq, hq, sf=4, lq_patchsize=64):
431
- h, w = lq.shape[:2]
432
- rnd_h = random.randint(0, h - lq_patchsize)
433
- rnd_w = random.randint(0, w - lq_patchsize)
434
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
435
-
436
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
437
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
438
- return lq, hq
439
-
440
-
441
- def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
442
- """
443
- This is the degradation model of BSRGAN from the paper
444
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
445
- ----------
446
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
447
- sf: scale factor
448
- isp_model: camera ISP model
449
- Returns
450
- -------
451
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
452
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
453
- """
454
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
455
- sf_ori = sf
456
-
457
- h1, w1 = img.shape[:2]
458
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
459
- h, w = img.shape[:2]
460
-
461
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
462
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
463
-
464
- hq = img.copy()
465
-
466
- if sf == 4 and random.random() < scale2_prob: # downsample1
467
- if np.random.rand() < 0.5:
468
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
469
- interpolation=random.choice([1, 2, 3]))
470
- else:
471
- img = util.imresize_np(img, 1 / 2, True)
472
- img = np.clip(img, 0.0, 1.0)
473
- sf = 2
474
-
475
- shuffle_order = random.sample(range(7), 7)
476
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
477
- if idx1 > idx2: # keep downsample3 last
478
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
479
-
480
- for i in shuffle_order:
481
-
482
- if i == 0:
483
- img = add_blur(img, sf=sf)
484
-
485
- elif i == 1:
486
- img = add_blur(img, sf=sf)
487
-
488
- elif i == 2:
489
- a, b = img.shape[1], img.shape[0]
490
- # downsample2
491
- if random.random() < 0.75:
492
- sf1 = random.uniform(1, 2 * sf)
493
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
494
- interpolation=random.choice([1, 2, 3]))
495
- else:
496
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
497
- k_shifted = shift_pixel(k, sf)
498
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
499
- img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
500
- img = img[0::sf, 0::sf, ...] # nearest downsampling
501
- img = np.clip(img, 0.0, 1.0)
502
-
503
- elif i == 3:
504
- # downsample3
505
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
506
- img = np.clip(img, 0.0, 1.0)
507
-
508
- elif i == 4:
509
- # add Gaussian noise
510
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
511
-
512
- elif i == 5:
513
- # add JPEG noise
514
- if random.random() < jpeg_prob:
515
- img = add_JPEG_noise(img)
516
-
517
- elif i == 6:
518
- # add processed camera sensor noise
519
- if random.random() < isp_prob and isp_model is not None:
520
- with torch.no_grad():
521
- img, hq = isp_model.forward(img.copy(), hq)
522
-
523
- # add final JPEG compression noise
524
- img = add_JPEG_noise(img)
525
-
526
- # random crop
527
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
528
-
529
- return img, hq
530
-
531
-
532
- # todo no isp_model?
533
- def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False):
534
- """
535
- This is the degradation model of BSRGAN from the paper
536
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
537
- ----------
538
- sf: scale factor
539
- isp_model: camera ISP model
540
- Returns
541
- -------
542
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
543
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
544
- """
545
- image = util.uint2single(image)
546
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
547
- sf_ori = sf
548
-
549
- h1, w1 = image.shape[:2]
550
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
551
- h, w = image.shape[:2]
552
-
553
- hq = image.copy()
554
-
555
- if sf == 4 and random.random() < scale2_prob: # downsample1
556
- if np.random.rand() < 0.5:
557
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
558
- interpolation=random.choice([1, 2, 3]))
559
- else:
560
- image = util.imresize_np(image, 1 / 2, True)
561
- image = np.clip(image, 0.0, 1.0)
562
- sf = 2
563
-
564
- shuffle_order = random.sample(range(7), 7)
565
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
566
- if idx1 > idx2: # keep downsample3 last
567
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
568
-
569
- for i in shuffle_order:
570
-
571
- if i == 0:
572
- image = add_blur(image, sf=sf)
573
-
574
- # elif i == 1:
575
- # image = add_blur(image, sf=sf)
576
-
577
- if i == 0:
578
- pass
579
-
580
- elif i == 2:
581
- a, b = image.shape[1], image.shape[0]
582
- # downsample2
583
- if random.random() < 0.8:
584
- sf1 = random.uniform(1, 2 * sf)
585
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
586
- interpolation=random.choice([1, 2, 3]))
587
- else:
588
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
589
- k_shifted = shift_pixel(k, sf)
590
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
591
- image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
592
- image = image[0::sf, 0::sf, ...] # nearest downsampling
593
-
594
- image = np.clip(image, 0.0, 1.0)
595
-
596
- elif i == 3:
597
- # downsample3
598
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
599
- image = np.clip(image, 0.0, 1.0)
600
-
601
- elif i == 4:
602
- # add Gaussian noise
603
- image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
604
-
605
- elif i == 5:
606
- # add JPEG noise
607
- if random.random() < jpeg_prob:
608
- image = add_JPEG_noise(image)
609
- #
610
- # elif i == 6:
611
- # # add processed camera sensor noise
612
- # if random.random() < isp_prob and isp_model is not None:
613
- # with torch.no_grad():
614
- # img, hq = isp_model.forward(img.copy(), hq)
615
-
616
- # add final JPEG compression noise
617
- image = add_JPEG_noise(image)
618
- image = util.single2uint(image)
619
- if up:
620
- image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then
621
- example = {"image": image}
622
- return example
623
-
624
-
625
-
626
-
627
- if __name__ == '__main__':
628
- print("hey")
629
- img = util.imread_uint('utils/test.png', 3)
630
- img = img[:448, :448]
631
- h = img.shape[0] // 4
632
- print("resizing to", h)
633
- sf = 4
634
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
635
- for i in range(20):
636
- print(i)
637
- img_hq = img
638
- img_lq = deg_fn(img)["image"]
639
- img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
640
- print(img_lq)
641
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
642
- print(img_lq.shape)
643
- print("bicubic", img_lq_bicubic.shape)
644
- print(img_hq.shape)
645
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
646
- interpolation=0)
647
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
648
- (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
649
- interpolation=0)
650
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
651
- util.imsave(img_concat, str(i) + '.png')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DEBO-PROJECT/DEBO-V1/modules/whisper_modules.py DELETED
@@ -1,75 +0,0 @@
1
- import openai
2
- import os
3
- import random
4
-
5
- from langchain.prompts import PromptTemplate
6
- from modules.gpt_modules import gpt_call
7
- # from dotenv import dotenv_values
8
-
9
- # config = dotenv_values(".env")
10
-
11
- # if config:
12
- # openai.organization = config.get('OPENAI_ORGANIZATION')
13
- # openai.api_key = config.get('OPENAI_API_KEY')
14
- # else:
15
- # openai.organization = st.secrets['OPENAI_ORGANIZATION'] #config.get('OPENAI_ORGANIZATION')
16
- # openai.api_key = st.secrets['OPENAI_API_KEY'] #config.get('OPENAI_API_KEY')
17
-
18
-
19
- def debate_in_sound(api_key, audio):
20
- os.rename(audio, audio + '.wav')
21
- file = open(audio + '.wav', "rb")
22
-
23
- openai.api_key = api_key
24
-
25
- # user_words
26
- user_prompt = openai.Audio.transcribe("whisper-1", file).text
27
-
28
- print("**************************************")
29
- print("user_audio transcription", user_prompt)
30
- print("**************************************")
31
-
32
- # Testing Prompt
33
- debate_subject = "In 2050, AI robots are able to replicate the appearance, conversation, and reaction to emotions of human beings. However, their intelligence still does not allow them to sense emotions and feelings such as pain, happiness, joy, and etc."
34
-
35
- debate_role = [
36
- "pro side",
37
- "con side",
38
- ]
39
- user_debate_role = random.choice(debate_role)
40
- bot_debate_role = "".join([role for role in debate_role if role != user_debate_role])
41
-
42
- debate_preset = "\n".join([
43
- "Debate Rules: ",
44
- "1) This debate will be divided into pro and con",
45
- "2) You must counter user's arguments",
46
- "3) Answer logically with an introduction, body, and conclusion.\n", #add this one.
47
- "User debate role: " + user_debate_role,
48
- "Bot debate roles: " + bot_debate_role + "\n",
49
- "Debate subject: " + debate_subject
50
- ])
51
-
52
- prompt_template = PromptTemplate(
53
- input_variables=["prompt"],
54
- template="\n".join([
55
- debate_preset, #persona
56
- "User: {prompt}",
57
- "Bot: "
58
- ])
59
- )
60
- bot_prompt = prompt_template.format(
61
- prompt=user_prompt
62
- )
63
- response = gpt_call(api_key, bot_prompt)
64
-
65
- return response
66
-
67
-
68
- def whisper_transcribe(api_key, audio_file):
69
- openai.api_key = api_key
70
-
71
- audio_file= open("audio/audio.wav", "rb")
72
- result = openai.Audio.transcribe("whisper-1", audio_file).text
73
- audio_file.close()
74
-
75
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/helpers.py DELETED
@@ -1,959 +0,0 @@
1
- """
2
- Defines helper methods useful for loading and caching Interface examples.
3
- """
4
- from __future__ import annotations
5
-
6
- import ast
7
- import csv
8
- import inspect
9
- import os
10
- import shutil
11
- import subprocess
12
- import tempfile
13
- import threading
14
- import warnings
15
- from pathlib import Path
16
- from typing import TYPE_CHECKING, Any, Callable, Iterable, Literal
17
-
18
- import matplotlib.pyplot as plt
19
- import numpy as np
20
- import PIL
21
- import PIL.Image
22
- from gradio_client import utils as client_utils
23
- from gradio_client.documentation import document, set_documentation_group
24
-
25
- from gradio import components, processing_utils, routes, utils
26
- from gradio.context import Context
27
- from gradio.flagging import CSVLogger
28
-
29
- if TYPE_CHECKING: # Only import for type checking (to avoid circular imports).
30
- from gradio.blocks import Block
31
- from gradio.components import IOComponent
32
-
33
- CACHED_FOLDER = "gradio_cached_examples"
34
- LOG_FILE = "log.csv"
35
-
36
- set_documentation_group("helpers")
37
-
38
-
39
- def create_examples(
40
- examples: list[Any] | list[list[Any]] | str,
41
- inputs: IOComponent | list[IOComponent],
42
- outputs: IOComponent | list[IOComponent] | None = None,
43
- fn: Callable | None = None,
44
- cache_examples: bool = False,
45
- examples_per_page: int = 10,
46
- _api_mode: bool = False,
47
- label: str | None = None,
48
- elem_id: str | None = None,
49
- run_on_click: bool = False,
50
- preprocess: bool = True,
51
- postprocess: bool = True,
52
- api_name: str | None | Literal[False] = False,
53
- batch: bool = False,
54
- ):
55
- """Top-level synchronous function that creates Examples. Provided for backwards compatibility, i.e. so that gr.Examples(...) can be used to create the Examples component."""
56
- examples_obj = Examples(
57
- examples=examples,
58
- inputs=inputs,
59
- outputs=outputs,
60
- fn=fn,
61
- cache_examples=cache_examples,
62
- examples_per_page=examples_per_page,
63
- _api_mode=_api_mode,
64
- label=label,
65
- elem_id=elem_id,
66
- run_on_click=run_on_click,
67
- preprocess=preprocess,
68
- postprocess=postprocess,
69
- api_name=api_name,
70
- batch=batch,
71
- _initiated_directly=False,
72
- )
73
- client_utils.synchronize_async(examples_obj.create)
74
- return examples_obj
75
-
76
-
77
- @document()
78
- class Examples:
79
- """
80
- This class is a wrapper over the Dataset component and can be used to create Examples
81
- for Blocks / Interfaces. Populates the Dataset component with examples and
82
- assigns event listener so that clicking on an example populates the input/output
83
- components. Optionally handles example caching for fast inference.
84
-
85
- Demos: blocks_inputs, fake_gan
86
- Guides: more-on-examples-and-flagging, using-hugging-face-integrations, image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, create-your-own-friends-with-a-gan
87
- """
88
-
89
- def __init__(
90
- self,
91
- examples: list[Any] | list[list[Any]] | str,
92
- inputs: IOComponent | list[IOComponent],
93
- outputs: IOComponent | list[IOComponent] | None = None,
94
- fn: Callable | None = None,
95
- cache_examples: bool = False,
96
- examples_per_page: int = 10,
97
- _api_mode: bool = False,
98
- label: str | None = "Examples",
99
- elem_id: str | None = None,
100
- run_on_click: bool = False,
101
- preprocess: bool = True,
102
- postprocess: bool = True,
103
- api_name: str | None | Literal[False] = False,
104
- batch: bool = False,
105
- _initiated_directly: bool = True,
106
- ):
107
- """
108
- Parameters:
109
- examples: example inputs that can be clicked to populate specific components. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs.
110
- inputs: the component or list of components corresponding to the examples
111
- outputs: optionally, provide the component or list of components corresponding to the output of the examples. Required if `cache` is True.
112
- fn: optionally, provide the function to run to generate the outputs corresponding to the examples. Required if `cache` is True.
113
- cache_examples: if True, caches examples for fast runtime. If True, then `fn` and `outputs` must be provided. If `fn` is a generator function, then the last yielded value will be used as the output.
114
- examples_per_page: how many examples to show per page.
115
- label: the label to use for the examples component (by default, "Examples")
116
- elem_id: an optional string that is assigned as the id of this component in the HTML DOM.
117
- run_on_click: if cache_examples is False, clicking on an example does not run the function when an example is clicked. Set this to True to run the function when an example is clicked. Has no effect if cache_examples is True.
118
- preprocess: if True, preprocesses the example input before running the prediction function and caching the output. Only applies if cache_examples is True.
119
- postprocess: if True, postprocesses the example output after running the prediction function and before caching. Only applies if cache_examples is True.
120
- api_name: Defines how the event associated with clicking on the examples appears in the API docs. Can be a string, None, or False. If False (default), the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
121
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. Used only if cache_examples is True.
122
- """
123
- if _initiated_directly:
124
- warnings.warn(
125
- "Please use gr.Examples(...) instead of gr.examples.Examples(...) to create the Examples.",
126
- )
127
-
128
- if cache_examples and (fn is None or outputs is None):
129
- raise ValueError("If caching examples, `fn` and `outputs` must be provided")
130
-
131
- if not isinstance(inputs, list):
132
- inputs = [inputs]
133
- if outputs and not isinstance(outputs, list):
134
- outputs = [outputs]
135
-
136
- working_directory = Path().absolute()
137
-
138
- if examples is None:
139
- raise ValueError("The parameter `examples` cannot be None")
140
- elif isinstance(examples, list) and (
141
- len(examples) == 0 or isinstance(examples[0], list)
142
- ):
143
- pass
144
- elif (
145
- isinstance(examples, list) and len(inputs) == 1
146
- ): # If there is only one input component, examples can be provided as a regular list instead of a list of lists
147
- examples = [[e] for e in examples]
148
- elif isinstance(examples, str):
149
- if not Path(examples).exists():
150
- raise FileNotFoundError(
151
- f"Could not find examples directory: {examples}"
152
- )
153
- working_directory = examples
154
- if not (Path(examples) / LOG_FILE).exists():
155
- if len(inputs) == 1:
156
- examples = [[e] for e in os.listdir(examples)]
157
- else:
158
- raise FileNotFoundError(
159
- "Could not find log file (required for multiple inputs): "
160
- + LOG_FILE
161
- )
162
- else:
163
- with open(Path(examples) / LOG_FILE) as logs:
164
- examples = list(csv.reader(logs))
165
- examples = [
166
- examples[i][: len(inputs)] for i in range(1, len(examples))
167
- ] # remove header and unnecessary columns
168
-
169
- else:
170
- raise ValueError(
171
- "The parameter `examples` must either be a string directory or a list"
172
- "(if there is only 1 input component) or (more generally), a nested "
173
- "list, where each sublist represents a set of inputs."
174
- )
175
-
176
- input_has_examples = [False] * len(inputs)
177
- for example in examples:
178
- for idx, example_for_input in enumerate(example):
179
- if example_for_input is not None:
180
- try:
181
- input_has_examples[idx] = True
182
- except IndexError:
183
- pass # If there are more example components than inputs, ignore. This can sometimes be intentional (e.g. loading from a log file where outputs and timestamps are also logged)
184
-
185
- inputs_with_examples = [
186
- inp for (inp, keep) in zip(inputs, input_has_examples) if keep
187
- ]
188
- non_none_examples = [
189
- [ex for (ex, keep) in zip(example, input_has_examples) if keep]
190
- for example in examples
191
- ]
192
-
193
- self.examples = examples
194
- self.non_none_examples = non_none_examples
195
- self.inputs = inputs
196
- self.inputs_with_examples = inputs_with_examples
197
- self.outputs = outputs
198
- self.fn = fn
199
- self.cache_examples = cache_examples
200
- self._api_mode = _api_mode
201
- self.preprocess = preprocess
202
- self.postprocess = postprocess
203
- self.api_name = api_name
204
- self.batch = batch
205
-
206
- with utils.set_directory(working_directory):
207
- self.processed_examples = [
208
- [
209
- component.postprocess(sample)
210
- for component, sample in zip(inputs, example)
211
- ]
212
- for example in examples
213
- ]
214
- self.non_none_processed_examples = [
215
- [ex for (ex, keep) in zip(example, input_has_examples) if keep]
216
- for example in self.processed_examples
217
- ]
218
- if cache_examples:
219
- for example in self.examples:
220
- if len([ex for ex in example if ex is not None]) != len(self.inputs):
221
- warnings.warn(
222
- "Examples are being cached but not all input components have "
223
- "example values. This may result in an exception being thrown by "
224
- "your function. If you do get an error while caching examples, make "
225
- "sure all of your inputs have example values for all of your examples "
226
- "or you provide default values for those particular parameters in your function."
227
- )
228
- break
229
-
230
- from gradio import components
231
-
232
- with utils.set_directory(working_directory):
233
- self.dataset = components.Dataset(
234
- components=inputs_with_examples,
235
- samples=non_none_examples,
236
- type="index",
237
- label=label,
238
- samples_per_page=examples_per_page,
239
- elem_id=elem_id,
240
- )
241
-
242
- self.cached_folder = Path(CACHED_FOLDER) / str(self.dataset._id)
243
- self.cached_file = Path(self.cached_folder) / "log.csv"
244
- self.cache_examples = cache_examples
245
- self.run_on_click = run_on_click
246
-
247
- async def create(self) -> None:
248
- """Caches the examples if self.cache_examples is True and creates the Dataset
249
- component to hold the examples"""
250
-
251
- async def load_example(example_id):
252
- if self.cache_examples:
253
- processed_example = self.non_none_processed_examples[
254
- example_id
255
- ] + await self.load_from_cache(example_id)
256
- else:
257
- processed_example = self.non_none_processed_examples[example_id]
258
- return utils.resolve_singleton(processed_example)
259
-
260
- if Context.root_block:
261
- if self.cache_examples and self.outputs:
262
- targets = self.inputs_with_examples + self.outputs
263
- else:
264
- targets = self.inputs_with_examples
265
- load_input_event = self.dataset.click(
266
- load_example,
267
- inputs=[self.dataset],
268
- outputs=targets, # type: ignore
269
- show_progress="hidden",
270
- postprocess=False,
271
- queue=False,
272
- api_name=self.api_name, # type: ignore
273
- )
274
- if self.run_on_click and not self.cache_examples:
275
- if self.fn is None:
276
- raise ValueError("Cannot run_on_click if no function is provided")
277
- load_input_event.then(
278
- self.fn,
279
- inputs=self.inputs, # type: ignore
280
- outputs=self.outputs, # type: ignore
281
- )
282
-
283
- if self.cache_examples:
284
- await self.cache()
285
-
286
- async def cache(self) -> None:
287
- """
288
- Caches all of the examples so that their predictions can be shown immediately.
289
- """
290
- if Path(self.cached_file).exists():
291
- print(
292
- f"Using cache from '{utils.abspath(self.cached_folder)}' directory. If method or examples have changed since last caching, delete this folder to clear cache.\n"
293
- )
294
- else:
295
- if Context.root_block is None:
296
- raise ValueError("Cannot cache examples if not in a Blocks context")
297
-
298
- print(f"Caching examples at: '{utils.abspath(self.cached_folder)}'")
299
- cache_logger = CSVLogger()
300
-
301
- if inspect.isgeneratorfunction(self.fn):
302
-
303
- def get_final_item(args): # type: ignore
304
- x = None
305
- for x in self.fn(args): # noqa: B007 # type: ignore
306
- pass
307
- return x
308
-
309
- fn = get_final_item
310
- elif inspect.isasyncgenfunction(self.fn):
311
-
312
- async def get_final_item(args):
313
- x = None
314
- async for x in self.fn(args): # noqa: B007 # type: ignore
315
- pass
316
- return x
317
-
318
- fn = get_final_item
319
- else:
320
- fn = self.fn
321
-
322
- # create a fake dependency to process the examples and get the predictions
323
- dependency, fn_index = Context.root_block.set_event_trigger(
324
- event_name="fake_event",
325
- fn=fn,
326
- inputs=self.inputs_with_examples, # type: ignore
327
- outputs=self.outputs, # type: ignore
328
- preprocess=self.preprocess and not self._api_mode,
329
- postprocess=self.postprocess and not self._api_mode,
330
- batch=self.batch,
331
- )
332
-
333
- assert self.outputs is not None
334
- cache_logger.setup(self.outputs, self.cached_folder)
335
- for example_id, _ in enumerate(self.examples):
336
- print(f"Caching example {example_id + 1}/{len(self.examples)}")
337
- processed_input = self.processed_examples[example_id]
338
- if self.batch:
339
- processed_input = [[value] for value in processed_input]
340
- with utils.MatplotlibBackendMananger():
341
- prediction = await Context.root_block.process_api(
342
- fn_index=fn_index,
343
- inputs=processed_input,
344
- request=None,
345
- state={},
346
- )
347
- output = prediction["data"]
348
- if self.batch:
349
- output = [value[0] for value in output]
350
- cache_logger.flag(output)
351
- # Remove the "fake_event" to prevent bugs in loading interfaces from spaces
352
- Context.root_block.dependencies.remove(dependency)
353
- Context.root_block.fns.pop(fn_index)
354
- print("Caching complete\n")
355
-
356
- async def load_from_cache(self, example_id: int) -> list[Any]:
357
- """Loads a particular cached example for the interface.
358
- Parameters:
359
- example_id: The id of the example to process (zero-indexed).
360
- """
361
- with open(self.cached_file, encoding="utf-8") as cache:
362
- examples = list(csv.reader(cache))
363
- example = examples[example_id + 1] # +1 to adjust for header
364
- output = []
365
- assert self.outputs is not None
366
- for component, value in zip(self.outputs, example):
367
- value_to_use = value
368
- try:
369
- value_as_dict = ast.literal_eval(value)
370
- # File components that output multiple files get saved as a python list
371
- # need to pass the parsed list to serialize
372
- # TODO: Better file serialization in 4.0
373
- if isinstance(value_as_dict, list) and isinstance(
374
- component, components.File
375
- ):
376
- value_to_use = value_as_dict
377
- assert utils.is_update(value_as_dict)
378
- output.append(value_as_dict)
379
- except (ValueError, TypeError, SyntaxError, AssertionError):
380
- output.append(
381
- component.serialize(
382
- value_to_use,
383
- self.cached_folder,
384
- )
385
- )
386
- return output
387
-
388
-
389
- class TrackedIterable:
390
- def __init__(
391
- self,
392
- iterable: Iterable | None,
393
- index: int | None,
394
- length: int | None,
395
- desc: str | None,
396
- unit: str | None,
397
- _tqdm=None,
398
- progress: float | None = None,
399
- ) -> None:
400
- self.iterable = iterable
401
- self.index = index
402
- self.length = length
403
- self.desc = desc
404
- self.unit = unit
405
- self._tqdm = _tqdm
406
- self.progress = progress
407
-
408
-
409
- @document("__call__", "tqdm")
410
- class Progress(Iterable):
411
- """
412
- The Progress class provides a custom progress tracker that is used in a function signature.
413
- To attach a Progress tracker to a function, simply add a parameter right after the input parameters that has a default value set to a `gradio.Progress()` instance.
414
- The Progress tracker can then be updated in the function by calling the Progress object or using the `tqdm` method on an Iterable.
415
- The Progress tracker is currently only available with `queue()`.
416
- Example:
417
- import gradio as gr
418
- import time
419
- def my_function(x, progress=gr.Progress()):
420
- progress(0, desc="Starting...")
421
- time.sleep(1)
422
- for i in progress.tqdm(range(100)):
423
- time.sleep(0.1)
424
- return x
425
- gr.Interface(my_function, gr.Textbox(), gr.Textbox()).queue().launch()
426
- Demos: progress
427
- """
428
-
429
- def __init__(
430
- self,
431
- track_tqdm: bool = False,
432
- _callback: Callable | None = None, # for internal use only
433
- _event_id: str | None = None,
434
- ):
435
- """
436
- Parameters:
437
- track_tqdm: If True, the Progress object will track any tqdm.tqdm iterations with the tqdm library in the function.
438
- """
439
- self.track_tqdm = track_tqdm
440
- self._callback = _callback
441
- self._event_id = _event_id
442
- self.iterables: list[TrackedIterable] = []
443
-
444
- def __len__(self):
445
- return self.iterables[-1].length
446
-
447
- def __iter__(self):
448
- return self
449
-
450
- def __next__(self):
451
- """
452
- Updates progress tracker with next item in iterable.
453
- """
454
- if self._callback:
455
- current_iterable = self.iterables[-1]
456
- while (
457
- not hasattr(current_iterable.iterable, "__next__")
458
- and len(self.iterables) > 0
459
- ):
460
- current_iterable = self.iterables.pop()
461
- self._callback(
462
- event_id=self._event_id,
463
- iterables=self.iterables,
464
- )
465
- assert current_iterable.index is not None, "Index not set."
466
- current_iterable.index += 1
467
- try:
468
- return next(current_iterable.iterable) # type: ignore
469
- except StopIteration:
470
- self.iterables.pop()
471
- raise
472
- else:
473
- return self
474
-
475
- def __call__(
476
- self,
477
- progress: float | tuple[int, int | None] | None,
478
- desc: str | None = None,
479
- total: int | None = None,
480
- unit: str = "steps",
481
- _tqdm=None,
482
- ):
483
- """
484
- Updates progress tracker with progress and message text.
485
- Parameters:
486
- progress: If float, should be between 0 and 1 representing completion. If Tuple, first number represents steps completed, and second value represents total steps or None if unknown. If None, hides progress bar.
487
- desc: description to display.
488
- total: estimated total number of steps.
489
- unit: unit of iterations.
490
- """
491
- if self._callback:
492
- if isinstance(progress, tuple):
493
- index, total = progress
494
- progress = None
495
- else:
496
- index = None
497
- self._callback(
498
- event_id=self._event_id,
499
- iterables=self.iterables
500
- + [TrackedIterable(None, index, total, desc, unit, _tqdm, progress)],
501
- )
502
- else:
503
- return progress
504
-
505
- def tqdm(
506
- self,
507
- iterable: Iterable | None,
508
- desc: str | None = None,
509
- total: int | None = None,
510
- unit: str = "steps",
511
- _tqdm=None,
512
- ):
513
- """
514
- Attaches progress tracker to iterable, like tqdm.
515
- Parameters:
516
- iterable: iterable to attach progress tracker to.
517
- desc: description to display.
518
- total: estimated total number of steps.
519
- unit: unit of iterations.
520
- """
521
- if self._callback:
522
- if iterable is None:
523
- new_iterable = TrackedIterable(None, 0, total, desc, unit, _tqdm)
524
- self.iterables.append(new_iterable)
525
- self._callback(event_id=self._event_id, iterables=self.iterables)
526
- return self
527
- length = len(iterable) if hasattr(iterable, "__len__") else None # type: ignore
528
- self.iterables.append(
529
- TrackedIterable(iter(iterable), 0, length, desc, unit, _tqdm)
530
- )
531
- return self
532
-
533
- def update(self, n=1):
534
- """
535
- Increases latest iterable with specified number of steps.
536
- Parameters:
537
- n: number of steps completed.
538
- """
539
- if self._callback and len(self.iterables) > 0:
540
- current_iterable = self.iterables[-1]
541
- assert current_iterable.index is not None, "Index not set."
542
- current_iterable.index += n
543
- self._callback(
544
- event_id=self._event_id,
545
- iterables=self.iterables,
546
- )
547
- else:
548
- return
549
-
550
- def close(self, _tqdm):
551
- """
552
- Removes iterable with given _tqdm.
553
- """
554
- if self._callback:
555
- for i in range(len(self.iterables)):
556
- if id(self.iterables[i]._tqdm) == id(_tqdm):
557
- self.iterables.pop(i)
558
- break
559
- self._callback(
560
- event_id=self._event_id,
561
- iterables=self.iterables,
562
- )
563
- else:
564
- return
565
-
566
-
567
- def create_tracker(root_blocks, event_id, fn, track_tqdm):
568
- progress = Progress(_callback=root_blocks._queue.set_progress, _event_id=event_id)
569
- if not track_tqdm:
570
- return progress, fn
571
-
572
- try:
573
- _tqdm = __import__("tqdm")
574
- except ModuleNotFoundError:
575
- return progress, fn
576
- if not hasattr(root_blocks, "_progress_tracker_per_thread"):
577
- root_blocks._progress_tracker_per_thread = {}
578
-
579
- def init_tqdm(
580
- self, iterable=None, desc=None, total=None, unit="steps", *args, **kwargs
581
- ):
582
- self._progress = root_blocks._progress_tracker_per_thread.get(
583
- threading.get_ident()
584
- )
585
- if self._progress is not None:
586
- self._progress.event_id = event_id
587
- self._progress.tqdm(iterable, desc, total, unit, _tqdm=self)
588
- kwargs["file"] = open(os.devnull, "w") # noqa: SIM115
589
- self.__init__orig__(iterable, desc, total, *args, unit=unit, **kwargs)
590
-
591
- def iter_tqdm(self):
592
- if self._progress is not None:
593
- return self._progress
594
- else:
595
- return self.__iter__orig__()
596
-
597
- def update_tqdm(self, n=1):
598
- if self._progress is not None:
599
- self._progress.update(n)
600
- return self.__update__orig__(n)
601
-
602
- def close_tqdm(self):
603
- if self._progress is not None:
604
- self._progress.close(self)
605
- return self.__close__orig__()
606
-
607
- def exit_tqdm(self, exc_type, exc_value, traceback):
608
- if self._progress is not None:
609
- self._progress.close(self)
610
- return self.__exit__orig__(exc_type, exc_value, traceback)
611
-
612
- if not hasattr(_tqdm.tqdm, "__init__orig__"):
613
- _tqdm.tqdm.__init__orig__ = _tqdm.tqdm.__init__
614
- _tqdm.tqdm.__init__ = init_tqdm
615
- if not hasattr(_tqdm.tqdm, "__update__orig__"):
616
- _tqdm.tqdm.__update__orig__ = _tqdm.tqdm.update
617
- _tqdm.tqdm.update = update_tqdm
618
- if not hasattr(_tqdm.tqdm, "__close__orig__"):
619
- _tqdm.tqdm.__close__orig__ = _tqdm.tqdm.close
620
- _tqdm.tqdm.close = close_tqdm
621
- if not hasattr(_tqdm.tqdm, "__exit__orig__"):
622
- _tqdm.tqdm.__exit__orig__ = _tqdm.tqdm.__exit__
623
- _tqdm.tqdm.__exit__ = exit_tqdm
624
- if not hasattr(_tqdm.tqdm, "__iter__orig__"):
625
- _tqdm.tqdm.__iter__orig__ = _tqdm.tqdm.__iter__
626
- _tqdm.tqdm.__iter__ = iter_tqdm
627
- if hasattr(_tqdm, "auto") and hasattr(_tqdm.auto, "tqdm"):
628
- _tqdm.auto.tqdm = _tqdm.tqdm
629
-
630
- def before_fn():
631
- thread_id = threading.get_ident()
632
- root_blocks._progress_tracker_per_thread[thread_id] = progress
633
-
634
- def after_fn():
635
- thread_id = threading.get_ident()
636
- del root_blocks._progress_tracker_per_thread[thread_id]
637
-
638
- tracked_fn = utils.function_wrapper(fn, before_fn=before_fn, after_fn=after_fn)
639
-
640
- return progress, tracked_fn
641
-
642
-
643
- def special_args(
644
- fn: Callable,
645
- inputs: list[Any] | None = None,
646
- request: routes.Request | None = None,
647
- event_data: EventData | None = None,
648
- ):
649
- """
650
- Checks if function has special arguments Request or EventData (via annotation) or Progress (via default value).
651
- If inputs is provided, these values will be loaded into the inputs array.
652
- Parameters:
653
- fn: function to check.
654
- inputs: array to load special arguments into.
655
- request: request to load into inputs.
656
- event_data: event-related data to load into inputs.
657
- Returns:
658
- updated inputs, progress index, event data index.
659
- """
660
- signature = inspect.signature(fn)
661
- type_hints = utils.get_type_hints(fn)
662
- positional_args = []
663
- for param in signature.parameters.values():
664
- if param.kind not in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD):
665
- break
666
- positional_args.append(param)
667
- progress_index = None
668
- event_data_index = None
669
- for i, param in enumerate(positional_args):
670
- type_hint = type_hints.get(param.name)
671
- if isinstance(param.default, Progress):
672
- progress_index = i
673
- if inputs is not None:
674
- inputs.insert(i, param.default)
675
- elif type_hint == routes.Request:
676
- if inputs is not None:
677
- inputs.insert(i, request)
678
- elif (
679
- type_hint
680
- and inspect.isclass(type_hint)
681
- and issubclass(type_hint, EventData)
682
- ):
683
- event_data_index = i
684
- if inputs is not None and event_data is not None:
685
- inputs.insert(i, type_hint(event_data.target, event_data._data))
686
- elif (
687
- param.default is not param.empty and inputs is not None and len(inputs) <= i
688
- ):
689
- inputs.insert(i, param.default)
690
- if inputs is not None:
691
- while len(inputs) < len(positional_args):
692
- i = len(inputs)
693
- param = positional_args[i]
694
- if param.default == param.empty:
695
- warnings.warn("Unexpected argument. Filling with None.")
696
- inputs.append(None)
697
- else:
698
- inputs.append(param.default)
699
- return inputs or [], progress_index, event_data_index
700
-
701
-
702
- @document()
703
- def update(**kwargs) -> dict:
704
- """
705
- Updates component properties. When a function passed into a Gradio Interface or a Blocks events returns a typical value, it updates the value of the output component. But it is also possible to update the properties of an output component (such as the number of lines of a `Textbox` or the visibility of an `Image`) by returning the component's `update()` function, which takes as parameters any of the constructor parameters for that component.
706
- This is a shorthand for using the update method on a component.
707
- For example, rather than using gr.Number.update(...) you can just use gr.update(...).
708
- Note that your editor's autocompletion will suggest proper parameters
709
- if you use the update method on the component.
710
- Demos: blocks_essay, blocks_update, blocks_essay_update
711
-
712
- Parameters:
713
- kwargs: Key-word arguments used to update the component's properties.
714
- Example:
715
- # Blocks Example
716
- import gradio as gr
717
- with gr.Blocks() as demo:
718
- radio = gr.Radio([1, 2, 4], label="Set the value of the number")
719
- number = gr.Number(value=2, interactive=True)
720
- radio.change(fn=lambda value: gr.update(value=value), inputs=radio, outputs=number)
721
- demo.launch()
722
-
723
- # Interface example
724
- import gradio as gr
725
- def change_textbox(choice):
726
- if choice == "short":
727
- return gr.Textbox.update(lines=2, visible=True)
728
- elif choice == "long":
729
- return gr.Textbox.update(lines=8, visible=True)
730
- else:
731
- return gr.Textbox.update(visible=False)
732
- gr.Interface(
733
- change_textbox,
734
- gr.Radio(
735
- ["short", "long", "none"], label="What kind of essay would you like to write?"
736
- ),
737
- gr.Textbox(lines=2),
738
- live=True,
739
- ).launch()
740
- """
741
- kwargs["__type__"] = "generic_update"
742
- return kwargs
743
-
744
-
745
- def skip() -> dict:
746
- return update()
747
-
748
-
749
- @document()
750
- def make_waveform(
751
- audio: str | tuple[int, np.ndarray],
752
- *,
753
- bg_color: str = "#f3f4f6",
754
- bg_image: str | None = None,
755
- fg_alpha: float = 0.75,
756
- bars_color: str | tuple[str, str] = ("#fbbf24", "#ea580c"),
757
- bar_count: int = 50,
758
- bar_width: float = 0.6,
759
- ) -> str:
760
- """
761
- Generates a waveform video from an audio file. Useful for creating an easy to share audio visualization. The output should be passed into a `gr.Video` component.
762
- Parameters:
763
- audio: Audio file path or tuple of (sample_rate, audio_data)
764
- bg_color: Background color of waveform (ignored if bg_image is provided)
765
- bg_image: Background image of waveform
766
- fg_alpha: Opacity of foreground waveform
767
- bars_color: Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient
768
- bar_count: Number of bars in waveform
769
- bar_width: Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc.
770
- Returns:
771
- A filepath to the output video in mp4 format.
772
- """
773
- if isinstance(audio, str):
774
- audio_file = audio
775
- audio = processing_utils.audio_from_file(audio)
776
- else:
777
- tmp_wav = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
778
- processing_utils.audio_to_file(audio[0], audio[1], tmp_wav.name, format="wav")
779
- audio_file = tmp_wav.name
780
-
781
- if not os.path.isfile(audio_file):
782
- raise ValueError("Audio file not found.")
783
-
784
- ffmpeg = shutil.which("ffmpeg")
785
- if not ffmpeg:
786
- raise RuntimeError("ffmpeg not found.")
787
-
788
- duration = round(len(audio[1]) / audio[0], 4)
789
-
790
- # Helper methods to create waveform
791
- def hex_to_rgb(hex_str):
792
- return [int(hex_str[i : i + 2], 16) for i in range(1, 6, 2)]
793
-
794
- def get_color_gradient(c1, c2, n):
795
- assert n > 1
796
- c1_rgb = np.array(hex_to_rgb(c1)) / 255
797
- c2_rgb = np.array(hex_to_rgb(c2)) / 255
798
- mix_pcts = [x / (n - 1) for x in range(n)]
799
- rgb_colors = [((1 - mix) * c1_rgb + (mix * c2_rgb)) for mix in mix_pcts]
800
- return [
801
- "#" + "".join(f"{int(round(val * 255)):02x}" for val in item)
802
- for item in rgb_colors
803
- ]
804
-
805
- # Reshape audio to have a fixed number of bars
806
- samples = audio[1]
807
- if len(samples.shape) > 1:
808
- samples = np.mean(samples, 1)
809
- bins_to_pad = bar_count - (len(samples) % bar_count)
810
- samples = np.pad(samples, [(0, bins_to_pad)])
811
- samples = np.reshape(samples, (bar_count, -1))
812
- samples = np.abs(samples)
813
- samples = np.max(samples, 1)
814
-
815
- with utils.MatplotlibBackendMananger():
816
- plt.clf()
817
- # Plot waveform
818
- color = (
819
- bars_color
820
- if isinstance(bars_color, str)
821
- else get_color_gradient(bars_color[0], bars_color[1], bar_count)
822
- )
823
- plt.bar(
824
- np.arange(0, bar_count),
825
- samples * 2,
826
- bottom=(-1 * samples),
827
- width=bar_width,
828
- color=color,
829
- )
830
- plt.axis("off")
831
- plt.margins(x=0)
832
- tmp_img = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
833
- savefig_kwargs: dict[str, Any] = {"bbox_inches": "tight"}
834
- if bg_image is not None:
835
- savefig_kwargs["transparent"] = True
836
- else:
837
- savefig_kwargs["facecolor"] = bg_color
838
- plt.savefig(tmp_img.name, **savefig_kwargs)
839
- waveform_img = PIL.Image.open(tmp_img.name)
840
- waveform_img = waveform_img.resize((1000, 200))
841
-
842
- # Composite waveform with background image
843
- if bg_image is not None:
844
- waveform_array = np.array(waveform_img)
845
- waveform_array[:, :, 3] = waveform_array[:, :, 3] * fg_alpha
846
- waveform_img = PIL.Image.fromarray(waveform_array)
847
-
848
- bg_img = PIL.Image.open(bg_image)
849
- waveform_width, waveform_height = waveform_img.size
850
- bg_width, bg_height = bg_img.size
851
- if waveform_width != bg_width:
852
- bg_img = bg_img.resize(
853
- (waveform_width, 2 * int(bg_height * waveform_width / bg_width / 2))
854
- )
855
- bg_width, bg_height = bg_img.size
856
- composite_height = max(bg_height, waveform_height)
857
- composite = PIL.Image.new(
858
- "RGBA", (waveform_width, composite_height), "#FFFFFF"
859
- )
860
- composite.paste(bg_img, (0, composite_height - bg_height))
861
- composite.paste(
862
- waveform_img, (0, composite_height - waveform_height), waveform_img
863
- )
864
- composite.save(tmp_img.name)
865
- img_width, img_height = composite.size
866
- else:
867
- img_width, img_height = waveform_img.size
868
- waveform_img.save(tmp_img.name)
869
-
870
- # Convert waveform to video with ffmpeg
871
- output_mp4 = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
872
-
873
- ffmpeg_cmd = [
874
- ffmpeg,
875
- "-loop",
876
- "1",
877
- "-i",
878
- tmp_img.name,
879
- "-i",
880
- audio_file,
881
- "-vf",
882
- f"color=c=#FFFFFF77:s={img_width}x{img_height}[bar];[0][bar]overlay=-w+(w/{duration})*t:H-h:shortest=1",
883
- "-t",
884
- str(duration),
885
- "-y",
886
- output_mp4.name,
887
- ]
888
-
889
- subprocess.check_call(ffmpeg_cmd)
890
- return output_mp4.name
891
-
892
-
893
- @document()
894
- class EventData:
895
- """
896
- When a subclass of EventData is added as a type hint to an argument of an event listener method, this object will be passed as that argument.
897
- It contains information about the event that triggered the listener, such the target object, and other data related to the specific event that are attributes of the subclass.
898
-
899
- Example:
900
- table = gr.Dataframe([[1, 2, 3], [4, 5, 6]])
901
- gallery = gr.Gallery([("cat.jpg", "Cat"), ("dog.jpg", "Dog")])
902
- textbox = gr.Textbox("Hello World!")
903
-
904
- statement = gr.Textbox()
905
-
906
- def on_select(evt: gr.SelectData): # SelectData is a subclass of EventData
907
- return f"You selected {evt.value} at {evt.index} from {evt.target}"
908
-
909
- table.select(on_select, None, statement)
910
- gallery.select(on_select, None, statement)
911
- textbox.select(on_select, None, statement)
912
- Demos: gallery_selections, tictactoe
913
- """
914
-
915
- def __init__(self, target: Block | None, _data: Any):
916
- """
917
- Parameters:
918
- target: The target object that triggered the event. Can be used to distinguish if multiple components are bound to the same listener.
919
- """
920
- self.target = target
921
- self._data = _data
922
-
923
-
924
- def log_message(message: str, level: Literal["info", "warning"] = "info"):
925
- from gradio import context
926
-
927
- if not hasattr(context.thread_data, "blocks"): # Function called outside of Gradio
928
- if level == "info":
929
- print(message)
930
- elif level == "warning":
931
- warnings.warn(message)
932
- return
933
- if not context.thread_data.blocks.enable_queue:
934
- warnings.warn(
935
- f"Queueing must be enabled to issue {level.capitalize()}: '{message}'."
936
- )
937
- return
938
- context.thread_data.blocks._queue.log_message(
939
- event_id=context.thread_data.event_id, log=message, level=level
940
- )
941
-
942
-
943
- @document()
944
- def Warning(message: str = "Warning issued."): # noqa: N802
945
- """
946
- This function allows you to pass custom warning messages to the user. You can do so simply with `gr.Warning('message here')`, and when that line is executed the custom message will appear in a modal on the demo.
947
- Parameters:
948
- message: The warning message to be displayed to the user.
949
- """
950
- log_message(message, level="warning")
951
-
952
-
953
- @document()
954
- def Info(message: str = "Info issued."): # noqa: N802
955
- """
956
- Parameters:
957
- message: The info message to be displayed to the user.
958
- """
959
- log_message(message, level="info")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/stylegan_human/edit.py DELETED
@@ -1,207 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- from edit.edit_helper import conv_warper, decoder, encoder_ifg, encoder_ss, encoder_sefa
4
- import legacy
5
- import subprocess
6
- from typing import List, Optional
7
- import cv2
8
- import click
9
- from torch_utils.models import Generator
10
- import os
11
- import sys
12
- import torch
13
- import numpy as np
14
- sys.path.append(".")
15
-
16
-
17
- """
18
- Edit generated images with different SOTA methods.
19
- Notes:
20
- 1. We provide some latent directions in the folder, you can play around with them.
21
- 2. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo.
22
- 3. Layers to control and editing strength are set in edit/edit_config.py.
23
-
24
- Examples:
25
-
26
- \b
27
- # Editing with InterfaceGAN, StyleSpace, and Sefa
28
- python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\
29
- --seeds 61531,61570,61571,61610 --outdir outputs/edit_results
30
-
31
-
32
- # Editing using inverted latent code
33
- python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\
34
- --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png
35
-
36
- """
37
-
38
-
39
- @click.command()
40
- @click.pass_context
41
- @click.option('--network', 'ckpt_path', help='Network pickle filename', required=True)
42
- @click.option('--attr_name', help='choose one of the attr: upper_length or bottom_length', type=str, required=True)
43
- @click.option('--trunc', 'truncation', type=float, help='Truncation psi', default=0.8, show_default=True)
44
- @click.option('--gen_video', type=bool, default=True, help='If want to generate video')
45
- @click.option('--combine', type=bool, default=True, help='If want to combine different editing results in the same frame')
46
- @click.option('--seeds', type=legacy.num_range, help='List of random seeds')
47
- @click.option('--outdir', help='Where to save the output images', type=str, required=True, default='outputs/editing', metavar='DIR')
48
- @click.option('--real', type=bool, help='True for editing real image', default=False)
49
- @click.option('--real_w_path', help='Path of latent code for real image')
50
- @click.option('--real_img_path', help='Path of real image, this just concat real image with inverted and edited results together')
51
- def main(
52
- ctx: click.Context,
53
- ckpt_path: str,
54
- attr_name: str,
55
- truncation: float,
56
- gen_video: bool,
57
- combine: bool,
58
- seeds: Optional[List[int]],
59
- outdir: str,
60
- real: str,
61
- real_w_path: str,
62
- real_img_path: str
63
- ):
64
- # convert pkl to pth
65
- # if not os.path.exists(ckpt_path.replace('.pkl','.pth')):
66
- legacy.convert(ckpt_path, ckpt_path.replace('.pkl', '.pth'), G_only=real)
67
- ckpt_path = ckpt_path.replace('.pkl', '.pth')
68
- print("start...", flush=True)
69
- config = {"latent": 512, "n_mlp": 8, "channel_multiplier": 2}
70
- generator = Generator(
71
- size=1024,
72
- style_dim=config["latent"],
73
- n_mlp=config["n_mlp"],
74
- channel_multiplier=config["channel_multiplier"]
75
- )
76
-
77
- generator.load_state_dict(torch.load(ckpt_path)['g_ema'])
78
- generator.eval().cuda()
79
-
80
- with torch.no_grad():
81
- mean_path = os.path.join('edit', 'mean_latent.pkl')
82
- if not os.path.exists(mean_path):
83
- mean_n = 3000
84
- mean_latent = generator.mean_latent(mean_n).detach()
85
- legacy.save_obj(mean_latent, mean_path)
86
- else:
87
- mean_latent = legacy.load_pkl(mean_path).cuda()
88
- finals = []
89
-
90
- ## -- selected sample seeds -- ##
91
- # seeds = [60948,60965,61174,61210,61511,61598,61610] #bottom -> long
92
- # [60941,61064,61103,61313,61531,61570,61571] # bottom -> short
93
- # [60941,60965,61064,61103,6117461210,61531,61570,61571,61610] # upper --> long
94
- # [60948,61313,61511,61598] # upper --> short
95
- if real:
96
- seeds = [0]
97
-
98
- for t in seeds:
99
- if real: # now assume process single real image only
100
- if real_img_path:
101
- real_image = cv2.imread(real_img_path)
102
- real_image = cv2.cvtColor(real_image, cv2.COLOR_BGR2RGB)
103
- import torchvision.transforms as transforms
104
- transform = transforms.Compose( # normalize to (-1, 1)
105
- [transforms.ToTensor(),
106
- transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5))]
107
- )
108
- real_image = transform(real_image).unsqueeze(0).cuda()
109
-
110
- test_input = torch.load(real_w_path)
111
- output, _ = generator(
112
- test_input, False, truncation=1, input_is_latent=True, real=True)
113
-
114
- else: # generate image from random seeds
115
- test_input = torch.from_numpy(np.random.RandomState(
116
- t).randn(1, 512)).float().cuda() # torch.Size([1, 512])
117
- output, _ = generator(
118
- [test_input], False, truncation=truncation, truncation_latent=mean_latent, real=real)
119
-
120
- # interfacegan
121
- style_space, latent, noise = encoder_ifg(
122
- generator, test_input, attr_name, truncation, mean_latent, real=real)
123
- image1 = decoder(generator, style_space, latent, noise)
124
- # stylespace
125
- style_space, latent, noise = encoder_ss(
126
- generator, test_input, attr_name, truncation, mean_latent, real=real)
127
- image2 = decoder(generator, style_space, latent, noise)
128
- # sefa
129
- latent, noise = encoder_sefa(
130
- generator, test_input, attr_name, truncation, mean_latent, real=real)
131
- image3, _ = generator([latent], noise=noise, input_is_latent=True)
132
- if real_img_path:
133
- final = torch.cat(
134
- (real_image, output, image1, image2, image3), 3)
135
- else:
136
- final = torch.cat((output, image1, image2, image3), 3)
137
-
138
- # legacy.visual(output, f'{outdir}/{attr_name}_{t:05d}_raw.jpg')
139
- # legacy.visual(image1, f'{outdir}/{attr_name}_{t:05d}_ifg.jpg')
140
- # legacy.visual(image2, f'{outdir}/{attr_name}_{t:05d}_ss.jpg')
141
- # legacy.visual(image3, f'{outdir}/{attr_name}_{t:05d}_sefa.jpg')
142
-
143
- if gen_video:
144
- total_step = 90
145
- if real:
146
- video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{real_w_path.split('/')[-2]}/"
147
- video_ss_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
148
- video_sefa_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
149
- else:
150
- video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{t:05d}/"
151
- video_ss_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
152
- video_sefa_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
153
- video_comb_path = f"{outdir}/video/tmp"
154
-
155
- if combine:
156
- if not os.path.exists(video_comb_path):
157
- os.makedirs(video_comb_path)
158
- else:
159
- if not os.path.exists(video_ifg_path):
160
- os.makedirs(video_ifg_path)
161
- if not os.path.exists(video_ss_path):
162
- os.makedirs(video_ss_path)
163
- if not os.path.exists(video_sefa_path):
164
- os.makedirs(video_sefa_path)
165
- for i in range(total_step):
166
- style_space, latent, noise = encoder_ifg(
167
- generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
168
- image1 = decoder(generator, style_space, latent, noise)
169
- style_space, latent, noise = encoder_ss(
170
- generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
171
- image2 = decoder(generator, style_space, latent, noise)
172
- latent, noise = encoder_sefa(
173
- generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
174
- image3, _ = generator(
175
- [latent], noise=noise, input_is_latent=True)
176
- if combine:
177
- if real_img_path:
178
- comb_img = torch.cat(
179
- (real_image, output, image1, image2, image3), 3)
180
- else:
181
- comb_img = torch.cat(
182
- (output, image1, image2, image3), 3)
183
- legacy.visual(comb_img, os.path.join(
184
- video_comb_path, f'{i:05d}.jpg'))
185
- else:
186
- legacy.visual(image1, os.path.join(
187
- video_ifg_path, f'{i:05d}.jpg'))
188
- legacy.visual(image2, os.path.join(
189
- video_ss_path, f'{i:05d}.jpg'))
190
- if combine:
191
- cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_comb_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path.replace('ifg_', '')[:-1] + '.mp4'}"
192
- subprocess.call(cmd, shell=True)
193
- else:
194
- cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ifg_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path[:-1] + '.mp4'}"
195
- subprocess.call(cmd, shell=True)
196
- cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ss_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ss_path[:-1] + '.mp4'}"
197
- subprocess.call(cmd, shell=True)
198
-
199
- # interfacegan, stylespace, sefa
200
- finals.append(final)
201
-
202
- final = torch.cat(finals, 2)
203
- legacy.visual(final, os.path.join(outdir, 'final.jpg'))
204
-
205
-
206
- if __name__ == "__main__":
207
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dragonnext/Unicorn-proxy/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Unicorn OAI Proxy
3
- emoji: 🦄
4
- sdk: docker
5
- colorFrom: gray
6
- colorTo: gray
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/demucs/test.py DELETED
@@ -1,109 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import gzip
8
- import sys
9
- from concurrent import futures
10
-
11
- import musdb
12
- import museval
13
- import torch as th
14
- import tqdm
15
- from scipy.io import wavfile
16
- from torch import distributed
17
-
18
- from .audio import convert_audio
19
- from .utils import apply_model
20
-
21
-
22
- def evaluate(model,
23
- musdb_path,
24
- eval_folder,
25
- workers=2,
26
- device="cpu",
27
- rank=0,
28
- save=False,
29
- shifts=0,
30
- split=False,
31
- overlap=0.25,
32
- is_wav=False,
33
- world_size=1):
34
- """
35
- Evaluate model using museval. Run the model
36
- on a single GPU, the bottleneck being the call to museval.
37
- """
38
-
39
- output_dir = eval_folder / "results"
40
- output_dir.mkdir(exist_ok=True, parents=True)
41
- json_folder = eval_folder / "results/test"
42
- json_folder.mkdir(exist_ok=True, parents=True)
43
-
44
- # we load tracks from the original musdb set
45
- test_set = musdb.DB(musdb_path, subsets=["test"], is_wav=is_wav)
46
- src_rate = 44100 # hardcoded for now...
47
-
48
- for p in model.parameters():
49
- p.requires_grad = False
50
- p.grad = None
51
-
52
- pendings = []
53
- with futures.ProcessPoolExecutor(workers or 1) as pool:
54
- for index in tqdm.tqdm(range(rank, len(test_set), world_size), file=sys.stdout):
55
- track = test_set.tracks[index]
56
-
57
- out = json_folder / f"{track.name}.json.gz"
58
- if out.exists():
59
- continue
60
-
61
- mix = th.from_numpy(track.audio).t().float()
62
- ref = mix.mean(dim=0) # mono mixture
63
- mix = (mix - ref.mean()) / ref.std()
64
- mix = convert_audio(mix, src_rate, model.samplerate, model.audio_channels)
65
- estimates = apply_model(model, mix.to(device),
66
- shifts=shifts, split=split, overlap=overlap)
67
- estimates = estimates * ref.std() + ref.mean()
68
-
69
- estimates = estimates.transpose(1, 2)
70
- references = th.stack(
71
- [th.from_numpy(track.targets[name].audio).t() for name in model.sources])
72
- references = convert_audio(references, src_rate,
73
- model.samplerate, model.audio_channels)
74
- references = references.transpose(1, 2).numpy()
75
- estimates = estimates.cpu().numpy()
76
- win = int(1. * model.samplerate)
77
- hop = int(1. * model.samplerate)
78
- if save:
79
- folder = eval_folder / "wav/test" / track.name
80
- folder.mkdir(exist_ok=True, parents=True)
81
- for name, estimate in zip(model.sources, estimates):
82
- wavfile.write(str(folder / (name + ".wav")), 44100, estimate)
83
-
84
- if workers:
85
- pendings.append((track.name, pool.submit(
86
- museval.evaluate, references, estimates, win=win, hop=hop)))
87
- else:
88
- pendings.append((track.name, museval.evaluate(
89
- references, estimates, win=win, hop=hop)))
90
- del references, mix, estimates, track
91
-
92
- for track_name, pending in tqdm.tqdm(pendings, file=sys.stdout):
93
- if workers:
94
- pending = pending.result()
95
- sdr, isr, sir, sar = pending
96
- track_store = museval.TrackStore(win=44100, hop=44100, track_name=track_name)
97
- for idx, target in enumerate(model.sources):
98
- values = {
99
- "SDR": sdr[idx].tolist(),
100
- "SIR": sir[idx].tolist(),
101
- "ISR": isr[idx].tolist(),
102
- "SAR": sar[idx].tolist()
103
- }
104
-
105
- track_store.add_target(target_name=target, values=values)
106
- json_path = json_folder / f"{track_name}.json.gz"
107
- gzip.open(json_path, "w").write(track_store.json.encode('utf-8'))
108
- if world_size > 1:
109
- distributed.barrier()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EronSamez/RVC_HFmeu/diffq/uniform.py DELETED
@@ -1,121 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Classic uniform quantization over n bits.
9
- """
10
- from typing import Tuple
11
- import torch
12
-
13
- from .base import BaseQuantizer
14
- from .utils import simple_repr
15
-
16
-
17
- def uniform_quantize(p: torch.Tensor, bits: torch.Tensor = torch.tensor(8.)):
18
- """
19
- Quantize the given weights over `bits` bits.
20
-
21
- Returns:
22
- - quantized levels
23
- - (min, max) range.
24
-
25
- """
26
- assert (bits >= 1).all() and (bits <= 15).all()
27
- num_levels = (2 ** bits.float()).long()
28
- mn = p.min().item()
29
- mx = p.max().item()
30
- p = (p - mn) / (mx - mn) # put p in [0, 1]
31
- unit = 1 / (num_levels - 1) # quantization unit
32
- levels = (p / unit).round()
33
- if (bits <= 8).all():
34
- levels = levels.byte()
35
- else:
36
- levels = levels.short()
37
- return levels, (mn, mx)
38
-
39
-
40
- def uniform_unquantize(levels: torch.Tensor, scales: Tuple[float, float],
41
- bits: torch.Tensor = torch.tensor(8.)):
42
- """
43
- Unquantize the weights from the levels and scale. Return a float32 tensor.
44
- """
45
- mn, mx = scales
46
- num_levels = 2 ** bits.float()
47
- unit = 1 / (num_levels - 1)
48
- levels = levels.float()
49
- p = levels * unit # in [0, 1]
50
- return p * (mx - mn) + mn
51
-
52
-
53
- class UniformQuantizer(BaseQuantizer):
54
- def __init__(self, model: torch.nn.Module, bits: float = 8., min_size: float = 0.01,
55
- float16: bool = False, qat: bool = False, exclude=[], detect_bound=True):
56
- """
57
- Args:
58
- model (torch.nn.Module): model to quantize
59
- bits (float): number of bits to quantize over.
60
- min_size (float): minimum size in MB of a parameter to be quantized.
61
- float16 (bool): if a layer is smaller than min_size, should we still do float16?
62
- qat (bool): perform quantized aware training.
63
- exclude (list[str]): list of patterns used to match parameters to exclude.
64
- For instance `['bias']` to exclude all bias terms.
65
- detect_bound (bool): if True, will detect bound parameters and reuse
66
- the same quantized tensor for both.
67
- """
68
- self.bits = float(bits)
69
- self.qat = qat
70
-
71
- super().__init__(model, min_size, float16, exclude, detect_bound)
72
-
73
- def __repr__(self):
74
- return simple_repr(self, )
75
-
76
- def _pre_forward_train(self):
77
- if self.qat:
78
- for qparam in self._qparams:
79
- if qparam.other is not None:
80
- new_param = qparam.other.module._parameters[qparam.other.name]
81
- else:
82
- quantized = self._quantize_param(qparam)
83
- qvalue = self._unquantize_param(qparam, quantized)
84
- new_param = qparam.param + (qvalue - qparam.param).detach()
85
- qparam.module._parameters[qparam.name] = new_param
86
- return True
87
- return False
88
-
89
- def _post_forward_train(self):
90
- if self.qat:
91
- for qparam in self._qparams:
92
- qparam.module._parameters[qparam.name] = qparam.param
93
- return True
94
- return False
95
-
96
- def _quantize_param(self, qparam):
97
- levels, scales = uniform_quantize(qparam.param.data, torch.tensor(self.bits))
98
- return (levels, scales)
99
-
100
- def _unquantize_param(self, qparam, quantized):
101
- levels, scales = quantized
102
- return uniform_unquantize(levels, scales, torch.tensor(self.bits))
103
-
104
- def model_size(self):
105
- """
106
- Non differentiable model size in MB.
107
- """
108
- total = super().model_size()
109
- subtotal = 0
110
- for qparam in self._qparams:
111
- if qparam.other is None: # if parameter is bound, count only one copy.
112
- subtotal += self.bits * qparam.param.numel() + 64 # 2 float for the overall scales
113
- subtotal /= 2**20 * 8 # bits to MegaBytes
114
- return total + subtotal
115
-
116
- def true_model_size(self):
117
- """
118
- Return the true quantized model size, in MB, without extra
119
- compression.
120
- """
121
- return self.model_size().item()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EuroPython2022/latr-vqa/app.py DELETED
@@ -1,148 +0,0 @@
1
- # Requirements.txt
2
- from torch import cuda
3
- from transformers import T5Tokenizer, T5ForConditionalGeneration
4
- import gradio as gr
5
- from utils import convert_ans_to_token, convert_ques_to_token, rotate, convert_token_to_ques, convert_token_to_answer
6
- from modeling import LaTr_for_pretraining, LaTr_for_finetuning, LaTrForVQA
7
- from dataset import load_json_file, get_specific_file, resize_align_bbox, get_tokens_with_boxes, create_features
8
- import torch.nn as nn
9
- from PIL import Image, ImageDraw
10
- import pytesseract
11
- from tqdm.auto import tqdm
12
- import numpy as np
13
- import json
14
- import os
15
- import torch
16
- from torchvision import transforms
17
-
18
-
19
- # install PyTesseract
20
- os.system('pip install -q pytesseract')
21
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
22
-
23
-
24
- # Default Library import
25
- # Visualization libraries
26
-
27
- # Specific libraries of LaTr
28
-
29
- # Setting the hyperparameters as well as primary configurations
30
-
31
- PAD_TOKEN_BOX = [0, 0, 0, 0]
32
- max_seq_len = 512
33
- batch_size = 2
34
- target_size = (500, 384)
35
- t5_model = "t5-base"
36
-
37
-
38
- device = 'cuda' if cuda.is_available() else 'cpu'
39
-
40
-
41
- # Configuration for the model
42
- config = {
43
- 't5_model': 't5-base',
44
- 'vocab_size': 32128,
45
- 'hidden_state': 768,
46
- 'max_2d_position_embeddings': 1001,
47
- 'classes': 32128, # number of tokens
48
- 'seq_len': 512
49
- }
50
-
51
- tokenizer = T5Tokenizer.from_pretrained(t5_model)
52
- latr = LaTrForVQA(config)
53
- url = 'https://www.kaggleusercontent.com/kf/99663112/eyJhbGciOiJkaXIiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0..5-IY5sqV-Y5lb7On3LOjMg._mvffzQwAyb-JSgwqhyxcjz3clhuAIwZEep4DA0CEao2LVjijahLYK9Co6yYVbdaEVk8CVqIGCx-_08XSdcsYnkt4HzCxI6zCI6Rv9_PhHITzTCZPC4juNgsmbb3ebu2eu5kJxUGsQvikk6efkpNoXFhPS5XV-Pqx_9wfxDyRJCJ1hzSxtiZcnsobKfoQt6F2w09NWGT45ePd_UlQNloogUD6icJDSWvyLvXHaVryKPGhy3q0_yaVheoBqflipUcUb1Q7q8wRDYbA3Kg_pAJzuyfPGhEp1WUEVt9gMXO1IIUCQbiygZRdGpKZBJwDx2LylLD3NwKMqv_maUknV0pCRhES45pFpuXv0X8ITGcr8DtGeLBIa9ZHW-eUEXETZnFdJqj6lU32IEyjJBhx1nNC_w6-0AGgH9ZC2c54sxUtmfOHmB9AhjYAmXi7Nmr2mQpDTBgrlPCQmNFLJ8GPWP0G6cDAgvZryVyFUm2z7SEcUzzLH6jHyr48ggGJBikNxZ4WL3W7L-zx_6v8BQBxBUp2KcZFzrfaXO1uoY2EyD3Y4ynTEUuEncS-UdRczCZCz6PqViyHJLycMnQteTw0j0ivEsLOlJkADufPX11f8ScVadd1YU-824nD6D5Kc16DRy0z1fHl1ZouI6Ahp3wY3AT-CR5te9kvYJUn_ggjvsm4d8CYc1qI6i1lfrNeeBxXCaK.dhOQv7UopiggmdGfsp-xmQ/models/epoch=0-step=34602.ckpt'
54
-
55
-
56
-
57
- try:
58
- latr = latr.load_from_checkpoint(url)
59
- print("Checkpoint loaded successfully")
60
- except:
61
- print("Checkpoint not loaded")
62
- pass
63
-
64
-
65
- image = gr.inputs.Image(type="pil")
66
- question = gr.inputs.Textbox(label="Question")
67
- answer = gr.outputs.Textbox(label="Predicted answer")
68
- examples = [["remote.jpg", "what number is the button near the top left?"]]
69
-
70
-
71
- from transformers import ViTFeatureExtractor, ViTModel
72
- vit_feat_extract = ViTFeatureExtractor("google/vit-base-patch16-224-in21k")
73
-
74
- import torchvision
75
- import numpy as np
76
-
77
- def answer_question(image, question):
78
-
79
- # Extracting features from the image
80
- image.save("sample.png")
81
- img, boxes, tokenized_words = create_features("sample.png",
82
- tokenizer=tokenizer,
83
- target_size=target_size,
84
- max_seq_length=max_seq_len,
85
- use_ocr=True
86
- )
87
-
88
- ## Converting the boxes as per the format required for model input
89
- boxes = torch.as_tensor(boxes, dtype=torch.int32)
90
- width = (boxes[:, 2] - boxes[:, 0]).view(-1, 1)
91
- height = (boxes[:, 3] - boxes[:, 1]).view(-1, 1)
92
- boxes = torch.cat([boxes, width, height], axis = -1)
93
-
94
- ## Clamping the value,as some of the box values are out of bound
95
- boxes[:, 0] = torch.clamp(boxes[:, 0], min = 0, max = 0)
96
- boxes[:, 2] = torch.clamp(boxes[:, 2], min = 1000, max = 1000)
97
- boxes[:, 4] = torch.clamp(boxes[:, 4], min = 1000, max = 1000)
98
-
99
- boxes[:, 1] = torch.clamp(boxes[:, 1], min = 0, max = 0)
100
- boxes[:, 3] = torch.clamp(boxes[:, 3], min = 1000, max = 1000)
101
- boxes[:, 5] = torch.clamp(boxes[:, 5], min = 1000, max = 1000)
102
-
103
- ## Tensor tokenized words
104
- tokenized_words = torch.as_tensor(tokenized_words, dtype=torch.int32)
105
- img = np.array(img)
106
- img = torchvision.transforms.ToTensor()(img)
107
- question = convert_ques_to_token(question = question, tokenizer = tokenizer)
108
-
109
- ## Expanding the dimension for inference
110
- boxes = boxes.unsqueeze(0)
111
- tokenized_words = tokenized_words.unsqueeze(0)
112
- question = question.unsqueeze(0)
113
-
114
- # print("Shape of Image is:", img.shape)
115
- img = vit_feat_extract(img, return_tensors = 'pt')['pixel_values']
116
- if int(len(img.shape)) == 3:
117
- img = img.unsqueeze(0)
118
-
119
- encoding = {'img': img, 'boxes': boxes, 'tokenized_words': tokenized_words, 'question': question}
120
-
121
- with torch.no_grad():
122
- logits = latr.forward(encoding)
123
- logits = logits.squeeze(0)
124
-
125
- _, preds = torch.max(logits, dim = 1)
126
- preds = preds.detach().cpu()
127
- mask = torch.clamp(preds, min = 0, max = 1)
128
- last_non_zero_argument = (mask != 0).nonzero()[1][-1]
129
-
130
- predicted_ans = convert_token_to_ques(preds[:last_non_zero_argument], tokenizer)
131
- return predicted_ans
132
-
133
-
134
- # Taken from here: https://huggingface.co/spaces/nielsr/vilt-vqa/blob/main/app.py
135
- title = "Interactive demo: LaTr (Layout Aware Transformer) for VQA"
136
- description = "Gradio Demo for LaTr (Layout Aware Transformer),trained on TextVQA Dataset. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
137
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.12494' target='_blank'>LaTr: Layout-aware transformer for scene-text VQA,a novel multimodal architecture for Scene Text Visual Question Answering (STVQA)</a> | <a href='https://github.com/uakarsh/latr' target='_blank'>Github Repo</a></p>"
138
- examples = [['remote.png', "Is remote present in the picture?"]]
139
-
140
- interface = gr.Interface(fn=answer_question,
141
- inputs=[image, question],
142
- outputs=answer,
143
- examples=examples,
144
- title=title,
145
- description=description,
146
- article=article,
147
- enable_queue=True)
148
- interface.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Faridmaruf/rvc-genshin-v2/lib/infer_pack/onnx_inference.py DELETED
@@ -1,145 +0,0 @@
1
- import onnxruntime
2
- import librosa
3
- import numpy as np
4
- import soundfile
5
-
6
-
7
- class ContentVec:
8
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
9
- print("load model(s) from {}".format(vec_path))
10
- if device == "cpu" or device is None:
11
- providers = ["CPUExecutionProvider"]
12
- elif device == "cuda":
13
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
14
- elif device == "dml":
15
- providers = ["DmlExecutionProvider"]
16
- else:
17
- raise RuntimeError("Unsportted Device")
18
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
19
-
20
- def __call__(self, wav):
21
- return self.forward(wav)
22
-
23
- def forward(self, wav):
24
- feats = wav
25
- if feats.ndim == 2: # double channels
26
- feats = feats.mean(-1)
27
- assert feats.ndim == 1, feats.ndim
28
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
29
- onnx_input = {self.model.get_inputs()[0].name: feats}
30
- logits = self.model.run(None, onnx_input)[0]
31
- return logits.transpose(0, 2, 1)
32
-
33
-
34
- def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
35
- if f0_predictor == "pm":
36
- from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
37
-
38
- f0_predictor_object = PMF0Predictor(
39
- hop_length=hop_length, sampling_rate=sampling_rate
40
- )
41
- elif f0_predictor == "harvest":
42
- from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
43
- HarvestF0Predictor,
44
- )
45
-
46
- f0_predictor_object = HarvestF0Predictor(
47
- hop_length=hop_length, sampling_rate=sampling_rate
48
- )
49
- elif f0_predictor == "dio":
50
- from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
51
-
52
- f0_predictor_object = DioF0Predictor(
53
- hop_length=hop_length, sampling_rate=sampling_rate
54
- )
55
- else:
56
- raise Exception("Unknown f0 predictor")
57
- return f0_predictor_object
58
-
59
-
60
- class OnnxRVC:
61
- def __init__(
62
- self,
63
- model_path,
64
- sr=40000,
65
- hop_size=512,
66
- vec_path="vec-768-layer-12",
67
- device="cpu",
68
- ):
69
- vec_path = f"pretrained/{vec_path}.onnx"
70
- self.vec_model = ContentVec(vec_path, device)
71
- if device == "cpu" or device is None:
72
- providers = ["CPUExecutionProvider"]
73
- elif device == "cuda":
74
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
75
- elif device == "dml":
76
- providers = ["DmlExecutionProvider"]
77
- else:
78
- raise RuntimeError("Unsportted Device")
79
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
80
- self.sampling_rate = sr
81
- self.hop_size = hop_size
82
-
83
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
84
- onnx_input = {
85
- self.model.get_inputs()[0].name: hubert,
86
- self.model.get_inputs()[1].name: hubert_length,
87
- self.model.get_inputs()[2].name: pitch,
88
- self.model.get_inputs()[3].name: pitchf,
89
- self.model.get_inputs()[4].name: ds,
90
- self.model.get_inputs()[5].name: rnd,
91
- }
92
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
93
-
94
- def inference(
95
- self,
96
- raw_path,
97
- sid,
98
- f0_method="dio",
99
- f0_up_key=0,
100
- pad_time=0.5,
101
- cr_threshold=0.02,
102
- ):
103
- f0_min = 50
104
- f0_max = 1100
105
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
106
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
107
- f0_predictor = get_f0_predictor(
108
- f0_method,
109
- hop_length=self.hop_size,
110
- sampling_rate=self.sampling_rate,
111
- threshold=cr_threshold,
112
- )
113
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
114
- org_length = len(wav)
115
- if org_length / sr > 50.0:
116
- raise RuntimeError("Reached Max Length")
117
-
118
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
119
- wav16k = wav16k
120
-
121
- hubert = self.vec_model(wav16k)
122
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
123
- hubert_length = hubert.shape[1]
124
-
125
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
126
- pitchf = pitchf * 2 ** (f0_up_key / 12)
127
- pitch = pitchf.copy()
128
- f0_mel = 1127 * np.log(1 + pitch / 700)
129
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
130
- f0_mel_max - f0_mel_min
131
- ) + 1
132
- f0_mel[f0_mel <= 1] = 1
133
- f0_mel[f0_mel > 255] = 255
134
- pitch = np.rint(f0_mel).astype(np.int64)
135
-
136
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
137
- pitch = pitch.reshape(1, len(pitch))
138
- ds = np.array([sid]).astype(np.int64)
139
-
140
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
141
- hubert_length = np.array([hubert_length]).astype(np.int64)
142
-
143
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
144
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
145
- return out_wav[0:org_length]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/waifu-tips.js DELETED
@@ -1,405 +0,0 @@
1
- window.live2d_settings = Array(); /*
2
-
3
- く__,.ヘヽ.    / ,ー、 〉
4
-      \ ', !-─‐-i / /´
5
-       /`ー'    L//`ヽ、 Live2D 看板娘 参数设置
6
-      /  /,  /|  ,  ,    ', Version 1.4.2
7
-    イ  / /-‐/ i L_ ハ ヽ!  i Update 2018.11.12
8
-     レ ヘ 7イ`ト  レ'ァ-ト、!ハ|  |
9
-      !,/7 '0'   ´0iソ|   |   
10
-      |.从"  _   ,,,, / |./   | 网页添加 Live2D 看板娘
11
-      レ'| i>.、,,__ _,.イ /  .i  | https://www.fghrsh.net/post/123.html
12
-       レ'| | / k_7_/レ'ヽ, ハ. |
13
-        | |/i 〈|/  i ,.ヘ | i | Thanks
14
-       .|/ / i:   ヘ!  \ | journey-ad / https://github.com/journey-ad/live2d_src
15
-         kヽ>、ハ   _,.ヘ、   /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
16
-        !'〈//`T´', \ `'7'ーr' Live2d Cubism SDK WebGL 2.1 Projrct & All model authors.
17
-        レ'ヽL__|___i,___,ンレ|ノ
18
-          ト-,/ |___./
19
-          'ー'  !_,.:*********************************************************************************/
20
-
21
-
22
- // 后端接口
23
- live2d_settings['modelAPI'] = '//live2d.fghrsh.net/api/'; // 自建 API 修改这里
24
- live2d_settings['tipsMessage'] = 'waifu-tips.json'; // 同目录下可省略路径
25
- live2d_settings['hitokotoAPI'] = 'lwl12.com'; // 一言 API,可选 'lwl12.com', 'hitokoto.cn', 'jinrishici.com'(古诗词)
26
-
27
- // 默认模型
28
- live2d_settings['modelId'] = 1; // 默认模型 ID,可在 F12 控制台找到
29
- live2d_settings['modelTexturesId'] = 53; // 默认材质 ID,可在 F12 控制台找到
30
-
31
- // 工具栏设置
32
- live2d_settings['showToolMenu'] = true; // 显示 工具栏 ,可选 true(真), false(假)
33
- live2d_settings['canCloseLive2d'] = true; // 显示 关闭看板娘 按钮,可选 true(真), false(假)
34
- live2d_settings['canSwitchModel'] = true; // 显示 模型切换 按钮,可选 true(真), false(假)
35
- live2d_settings['canSwitchTextures'] = true; // 显示 材质切换 按钮,可选 true(真), false(假)
36
- live2d_settings['canSwitchHitokoto'] = true; // 显示 一言切换 按钮,可选 true(真), false(假)
37
- live2d_settings['canTakeScreenshot'] = true; // 显示 看板娘截图 按钮,可选 true(真), false(假)
38
- live2d_settings['canTurnToHomePage'] = true; // 显示 返回首页 按钮,可选 true(真), false(假)
39
- live2d_settings['canTurnToAboutPage'] = true; // 显示 跳转关于页 按钮,可选 true(真), false(假)
40
-
41
- // 模型切换模式
42
- live2d_settings['modelStorage'] = true; // 记录 ID (刷新后恢复),可选 true(真), false(假)
43
- live2d_settings['modelRandMode'] = 'switch'; // 模型切换,可选 'rand'(随机), 'switch'(顺序)
44
- live2d_settings['modelTexturesRandMode']= 'rand'; // 材质切换,可选 'rand'(随机), 'switch'(顺序)
45
-
46
- // 提示消息选项
47
- live2d_settings['showHitokoto'] = true; // 显示一言
48
- live2d_settings['showF12Status'] = true; // 显示加载状态
49
- live2d_settings['showF12Message'] = false; // 显示看板娘消息
50
- live2d_settings['showF12OpenMsg'] = true; // 显示控制台打开提示
51
- live2d_settings['showCopyMessage'] = true; // 显示 复制内容 提示
52
- live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
53
-
54
- //看板娘样式设置
55
- live2d_settings['waifuSize'] = '280x250'; // 看板娘大小,例如 '280x250', '600x535'
56
- live2d_settings['waifuTipsSize'] = '250x70'; // 提示框大小,例如 '250x70', '570x150'
57
- live2d_settings['waifuFontSize'] = '12px'; // 提示框字体,例如 '12px', '30px'
58
- live2d_settings['waifuToolFont'] = '14px'; // 工具栏字体,例如 '14px', '36px'
59
- live2d_settings['waifuToolLine'] = '20px'; // 工具栏行高,例如 '20px', '36px'
60
- live2d_settings['waifuToolTop'] = '0px' // 工具栏顶部边距,例如 '0px', '-60px'
61
- live2d_settings['waifuMinWidth'] = '768px'; // 面页小于 指定宽度 隐藏看板娘,例如 'disable'(禁用), '768px'
62
- live2d_settings['waifuEdgeSide'] = 'left:0'; // 看板娘贴边方向,例如 'left:0'(靠左 0px), 'right:30'(靠右 30px)
63
- live2d_settings['waifuDraggable'] = 'disable'; // 拖拽样式,例如 'disable'(禁用), 'axis-x'(只能水平拖拽), 'unlimited'(自由拖拽)
64
- live2d_settings['waifuDraggableRevert'] = true; // 松开鼠标还原拖拽位置,可选 true(真), false(假)
65
-
66
- // 其他杂项设置
67
- live2d_settings['l2dVersion'] = '1.4.2'; // 当前版本
68
- live2d_settings['l2dVerDate'] = '2018.11.12'; // 版本更新日期
69
- live2d_settings['homePageUrl'] = 'auto'; // 主页地址,可选 'auto'(自动), '{URL 网址}'
70
- live2d_settings['aboutPageUrl'] = 'https://www.fghrsh.net/post/123.html'; // 关于页地址, '{URL 网址}'
71
- live2d_settings['screenshotCaptureName']= 'live2d.png'; // 看板娘截图文件名,例如 'live2d.png'
72
-
73
- /****************************************************************************************************/
74
-
75
- String.prototype.render = function(context) {
76
- var tokenReg = /(\\)?\{([^\{\}\\]+)(\\)?\}/g;
77
-
78
- return this.replace(tokenReg, function (word, slash1, token, slash2) {
79
- if (slash1 || slash2) { return word.replace('\\', ''); }
80
-
81
- var variables = token.replace(/\s/g, '').split('.');
82
- var currentObject = context;
83
- var i, length, variable;
84
-
85
- for (i = 0, length = variables.length; i < length; ++i) {
86
- variable = variables[i];
87
- currentObject = currentObject[variable];
88
- if (currentObject === undefined || currentObject === null) return '';
89
- }
90
- return currentObject;
91
- });
92
- };
93
-
94
- var re = /x/;
95
- console.log(re);
96
-
97
- function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false}
98
- function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text}
99
-
100
- function showMessage(text, timeout, flag) {
101
- if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
102
- if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
103
- if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
104
-
105
- if(flag) sessionStorage.setItem('waifu-text', text);
106
-
107
- $('.waifu-tips').stop();
108
- $('.waifu-tips').html(text).fadeTo(200, 1);
109
- if (timeout === undefined) timeout = 5000;
110
- hideMessage(timeout);
111
- }
112
- }
113
-
114
- function hideMessage(timeout) {
115
- $('.waifu-tips').stop().css('opacity',1);
116
- if (timeout === undefined) timeout = 5000;
117
- window.setTimeout(function() {sessionStorage.removeItem('waifu-text')}, timeout);
118
- $('.waifu-tips').delay(timeout).fadeTo(200, 0);
119
- }
120
-
121
- function initModel(waifuPath, type) {
122
- /* console welcome message */
123
- eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
124
-
125
- /* 判断 JQuery */
126
- if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
127
-
128
- /* 加载看板娘样式 */
129
- live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
130
- live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
131
- live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
132
-
133
- $("#live2d").attr("width",live2d_settings.waifuSize[0]);
134
- $("#live2d").attr("height",live2d_settings.waifuSize[1]);
135
- $(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
136
- $(".waifu-tips").height(live2d_settings.waifuTipsSize[1]);
137
- $(".waifu-tips").css("top",live2d_settings.waifuToolTop);
138
- $(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
139
- $(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
140
- $(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
141
-
142
- if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
143
- else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
144
-
145
- window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
146
- if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
147
-
148
- try {
149
- if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
150
- else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
151
- else $(".waifu").css("transition", 'all .3s ease-in-out');
152
- } catch(err) { console.log('[Error] JQuery UI is not defined.') }
153
-
154
- live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
155
- if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
156
-
157
- $('.waifu-tool .fui-home').click(function (){
158
- //window.location = 'https://www.fghrsh.net/';
159
- window.location = live2d_settings.homePageUrl;
160
- });
161
-
162
- $('.waifu-tool .fui-info-circle').click(function (){
163
- //window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
164
- window.open(live2d_settings.aboutPageUrl);
165
- });
166
-
167
- if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
168
- $.ajax({
169
- cache: true,
170
- url: waifuPath == '' ? live2d_settings.tipsMessage : (waifuPath.substr(waifuPath.length-15)=='waifu-tips.json'?waifuPath:waifuPath+'waifu-tips.json'),
171
- dataType: "json",
172
- success: function (result){ loadTipsMessage(result); }
173
- });
174
- }
175
-
176
- if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
177
- if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
178
- if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
179
- if (!live2d_settings.canSwitchTextures) $('.waifu-tool .fui-user').hide();
180
- if (!live2d_settings.canSwitchHitokoto) $('.waifu-tool .fui-chat').hide();
181
- if (!live2d_settings.canTakeScreenshot) $('.waifu-tool .fui-photo').hide();
182
- if (!live2d_settings.canTurnToHomePage) $('.waifu-tool .fui-home').hide();
183
- if (!live2d_settings.canTurnToAboutPage) $('.waifu-tool .fui-info-circle').hide();
184
-
185
- if (waifuPath === undefined) waifuPath = '';
186
- var modelId = localStorage.getItem('modelId');
187
- var modelTexturesId = localStorage.getItem('modelTexturesId');
188
-
189
- if (!live2d_settings.modelStorage || modelId == null) {
190
- var modelId = live2d_settings.modelId;
191
- var modelTexturesId = live2d_settings.modelTexturesId;
192
- } loadModel(modelId, modelTexturesId);
193
- }
194
-
195
- function loadModel(modelId, modelTexturesId=0) {
196
- if (live2d_settings.modelStorage) {
197
- localStorage.setItem('modelId', modelId);
198
- localStorage.setItem('modelTexturesId', modelTexturesId);
199
- } else {
200
- sessionStorage.setItem('modelId', modelId);
201
- sessionStorage.setItem('modelTexturesId', modelTexturesId);
202
- } loadlive2d('live2d', live2d_settings.modelAPI+'get/?id='+modelId+'-'+modelTexturesId, (live2d_settings.showF12Status ? console.log('[Status]','live2d','模型',modelId+'-'+modelTexturesId,'加载完成'):null));
203
- }
204
-
205
- function loadTipsMessage(result) {
206
- window.waifu_tips = result;
207
-
208
- $.each(result.mouseover, function (index, tips){
209
- $(document).on("mouseover", tips.selector, function (){
210
- var text = getRandText(tips.text);
211
- text = text.render({text: $(this).text()});
212
- showMessage(text, 3000);
213
- });
214
- });
215
- $.each(result.click, function (index, tips){
216
- $(document).on("click", tips.selector, function (){
217
- var text = getRandText(tips.text);
218
- text = text.render({text: $(this).text()});
219
- showMessage(text, 3000, true);
220
- });
221
- });
222
- $.each(result.seasons, function (index, tips){
223
- var now = new Date();
224
- var after = tips.date.split('-')[0];
225
- var before = tips.date.split('-')[1] || after;
226
-
227
- if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
228
- (after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
229
- var text = getRandText(tips.text);
230
- text = text.render({year: now.getFullYear()});
231
- showMessage(text, 6000, true);
232
- }
233
- });
234
-
235
- if (live2d_settings.showF12OpenMsg) {
236
- re.toString = function() {
237
- showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
238
- return '';
239
- };
240
- }
241
-
242
- if (live2d_settings.showCopyMessage) {
243
- $(document).on('copy', function() {
244
- showMessage(getRandText(result.waifu.copy_message), 5000, true);
245
- });
246
- }
247
-
248
- $('.waifu-tool .fui-photo').click(function(){
249
- showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
250
- window.Live2D.captureName = live2d_settings.screenshotCaptureName;
251
- window.Live2D.captureFrame = true;
252
- });
253
-
254
- $('.waifu-tool .fui-cross').click(function(){
255
- sessionStorage.setItem('waifu-dsiplay', 'none');
256
- showMessage(getRandText(result.waifu.hidden_message), 1300, true);
257
- window.setTimeout(function() {$('.waifu').hide();}, 1300);
258
- });
259
-
260
- window.showWelcomeMessage = function(result) {
261
- var text;
262
- if (window.location.href == live2d_settings.homePageUrl) {
263
- var now = (new Date()).getHours();
264
- if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']);
265
- else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']);
266
- else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']);
267
- else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']);
268
- else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']);
269
- else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']);
270
- else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']);
271
- else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']);
272
- else text = getRandText(result.waifu.hour_tips.default);
273
- } else {
274
- var referrer_message = result.waifu.referrer_message;
275
- if (document.referrer !== '') {
276
- var referrer = document.createElement('a');
277
- referrer.href = document.referrer;
278
- var domain = referrer.hostname.split('.')[1];
279
- if (window.location.hostname == referrer.hostname)
280
- text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1];
281
- else if (domain == 'baidu')
282
- text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1];
283
- else if (domain == 'so')
284
- text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1];
285
- else if (domain == 'google')
286
- text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1];
287
- else {
288
- $.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)});
289
- text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1];
290
- }
291
- } else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1];
292
- }
293
- showMessage(text, 6000);
294
- }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
295
-
296
- var waifu_tips = result.waifu;
297
-
298
- function loadOtherModel() {
299
- var modelId = modelStorageGetItem('modelId');
300
- var modelRandMode = live2d_settings.modelRandMode;
301
-
302
- $.ajax({
303
- cache: modelRandMode == 'switch' ? true : false,
304
- url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
305
- dataType: "json",
306
- success: function(result) {
307
- loadModel(result.model['id']);
308
- var message = result.model['message'];
309
- $.each(waifu_tips.model_message, function(i,val) {if (i==result.model['id']) message = getRandText(val)});
310
- showMessage(message, 3000, true);
311
- }
312
- });
313
- }
314
-
315
- function loadRandTextures() {
316
- var modelId = modelStorageGetItem('modelId');
317
- var modelTexturesId = modelStorageGetItem('modelTexturesId');
318
- var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
319
-
320
- $.ajax({
321
- cache: modelTexturesRandMode == 'switch' ? true : false,
322
- url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
323
- dataType: "json",
324
- success: function(result) {
325
- if (result.textures['id'] == 1 && (modelTexturesId == 1 || modelTexturesId == 0))
326
- showMessage(waifu_tips.load_rand_textures[0], 3000, true);
327
- else showMessage(waifu_tips.load_rand_textures[1], 3000, true);
328
- loadModel(modelId, result.textures['id']);
329
- }
330
- });
331
- }
332
-
333
- function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
334
-
335
- /* 检测用户活动状态,并在空闲时显示一言 */
336
- if (live2d_settings.showHitokoto) {
337
- window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
338
- $(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
339
- setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
340
- }
341
-
342
- function ifActed() {
343
- if (!hitokotoInterval) {
344
- hitokotoInterval = true;
345
- hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
346
- }
347
- }
348
-
349
- function elseActed() {
350
- getActed = hitokotoInterval = false;
351
- window.clearInterval(hitokotoTimer);
352
- }
353
-
354
- function showHitokotoActed() {
355
- if ($(document)[0].visibilityState == 'visible') showHitokoto();
356
- }
357
-
358
- function showHitokoto() {
359
- switch(live2d_settings.hitokotoAPI) {
360
- case 'lwl12.com':
361
- $.getJSON('https://api.lwl12.com/hitokoto/v1?encode=realjson',function(result){
362
- if (!empty(result.source)) {
363
- var text = waifu_tips.hitokoto_api_message['lwl12.com'][0];
364
- if (!empty(result.author)) text += waifu_tips.hitokoto_api_message['lwl12.com'][1];
365
- text = text.render({source: result.source, creator: result.author});
366
- window.setTimeout(function() {showMessage(text+waifu_tips.hitokoto_api_message['lwl12.com'][2], 3000, true);}, 5000);
367
- } showMessage(result.text, 5000, true);
368
- });break;
369
- case 'fghrsh.net':
370
- $.getJSON('https://api.fghrsh.net/hitokoto/rand/?encode=jsc&uid=3335',function(result){
371
- if (!empty(result.source)) {
372
- var text = waifu_tips.hitokoto_api_message['fghrsh.net'][0];
373
- text = text.render({source: result.source, date: result.date});
374
- window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
375
- showMessage(result.hitokoto, 5000, true);
376
- }
377
- });break;
378
- case 'jinrishici.com':
379
- $.ajax({
380
- url: 'https://v2.jinrishici.com/one.json',
381
- xhrFields: {withCredentials: true},
382
- success: function (result, status) {
383
- if (!empty(result.data.origin.title)) {
384
- var text = waifu_tips.hitokoto_api_message['jinrishici.com'][0];
385
- text = text.render({title: result.data.origin.title, dynasty: result.data.origin.dynasty, author:result.data.origin.author});
386
- window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
387
- } showMessage(result.data.content, 5000, true);
388
- }
389
- });break;
390
- default:
391
- $.getJSON('https://v1.hitokoto.cn',function(result){
392
- if (!empty(result.from)) {
393
- var text = waifu_tips.hitokoto_api_message['hitokoto.cn'][0];
394
- text = text.render({source: result.from, creator: result.creator});
395
- window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
396
- }
397
- showMessage(result.hitokoto, 5000, true);
398
- });
399
- }
400
- }
401
-
402
- $('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
403
- $('.waifu-tool .fui-user').click(function (){loadRandTextures()});
404
- $('.waifu-tool .fui-chat').click(function (){showHitokoto()});
405
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Flux9665/IMS-Toucan/Preprocessing/AudioPreprocessor.py DELETED
@@ -1,166 +0,0 @@
1
- import librosa
2
- import librosa.core as lb
3
- import librosa.display as lbd
4
- import matplotlib.pyplot as plt
5
- import numpy
6
- import numpy as np
7
- import pyloudnorm as pyln
8
- import torch
9
- from torchaudio.transforms import Resample
10
-
11
-
12
- class AudioPreprocessor:
13
-
14
- def __init__(self, input_sr, output_sr=None, melspec_buckets=80, hop_length=256, n_fft=1024, cut_silence=False, device="cpu"):
15
- """
16
- The parameters are by default set up to do well
17
- on a 16kHz signal. A different sampling rate may
18
- require different hop_length and n_fft (e.g.
19
- doubling frequency --> doubling hop_length and
20
- doubling n_fft)
21
- """
22
- self.cut_silence = cut_silence
23
- self.device = device
24
- self.sr = input_sr
25
- self.new_sr = output_sr
26
- self.hop_length = hop_length
27
- self.n_fft = n_fft
28
- self.mel_buckets = melspec_buckets
29
- self.meter = pyln.Meter(input_sr)
30
- self.final_sr = input_sr
31
- if cut_silence:
32
- torch.hub._validate_not_a_forked_repo = lambda a, b, c: True # torch 1.9 has a bug in the hub loading, this is a workaround
33
- # careful: assumes 16kHz or 8kHz audio
34
- self.silero_model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
35
- model='silero_vad',
36
- force_reload=False,
37
- onnx=False,
38
- verbose=False)
39
- (self.get_speech_timestamps,
40
- self.save_audio,
41
- self.read_audio,
42
- self.VADIterator,
43
- self.collect_chunks) = utils
44
- self.silero_model = self.silero_model.to(self.device)
45
- if output_sr is not None and output_sr != input_sr:
46
- self.resample = Resample(orig_freq=input_sr, new_freq=output_sr).to(self.device)
47
- self.final_sr = output_sr
48
- else:
49
- self.resample = lambda x: x
50
-
51
- def cut_silence_from_audio(self, audio):
52
- """
53
- https://github.com/snakers4/silero-vad
54
- """
55
- return self.collect_chunks(self.get_speech_timestamps(audio, self.silero_model, sampling_rate=self.final_sr), audio)
56
-
57
- def to_mono(self, x):
58
- """
59
- make sure we deal with a 1D array
60
- """
61
- if len(x.shape) == 2:
62
- return lb.to_mono(numpy.transpose(x))
63
- else:
64
- return x
65
-
66
- def normalize_loudness(self, audio):
67
- """
68
- normalize the amplitudes according to
69
- their decibels, so this should turn any
70
- signal with different magnitudes into
71
- the same magnitude by analysing loudness
72
- """
73
- loudness = self.meter.integrated_loudness(audio)
74
- loud_normed = pyln.normalize.loudness(audio, loudness, -30.0)
75
- peak = numpy.amax(numpy.abs(loud_normed))
76
- peak_normed = numpy.divide(loud_normed, peak)
77
- return peak_normed
78
-
79
- def logmelfilterbank(self, audio, sampling_rate, fmin=40, fmax=8000, eps=1e-10):
80
- """
81
- Compute log-Mel filterbank
82
-
83
- one day this could be replaced by torchaudio's internal log10(melspec(audio)), but
84
- for some reason it gives slightly different results, so in order not to break backwards
85
- compatibility, this is kept for now. If there is ever a reason to completely re-train
86
- all models, this would be a good opportunity to make the switch.
87
- """
88
- if isinstance(audio, torch.Tensor):
89
- audio = audio.numpy()
90
- # get amplitude spectrogram
91
- x_stft = librosa.stft(audio, n_fft=self.n_fft, hop_length=self.hop_length, win_length=None, window="hann", pad_mode="reflect")
92
- spc = np.abs(x_stft).T
93
- # get mel basis
94
- fmin = 0 if fmin is None else fmin
95
- fmax = sampling_rate / 2 if fmax is None else fmax
96
- mel_basis = librosa.filters.mel(sampling_rate, self.n_fft, self.mel_buckets, fmin, fmax)
97
- # apply log and return
98
- return torch.Tensor(np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))).transpose(0, 1)
99
-
100
- def normalize_audio(self, audio):
101
- """
102
- one function to apply them all in an
103
- order that makes sense.
104
- """
105
- audio = self.to_mono(audio)
106
- audio = self.normalize_loudness(audio)
107
- audio = torch.Tensor(audio).to(self.device)
108
- audio = self.resample(audio)
109
- if self.cut_silence:
110
- audio = self.cut_silence_from_audio(audio)
111
- return audio.to("cpu")
112
-
113
- def visualize_cleaning(self, unclean_audio):
114
- """
115
- displays Mel Spectrogram of unclean audio
116
- and then displays Mel Spectrogram of the
117
- cleaned version.
118
- """
119
- fig, ax = plt.subplots(nrows=2, ncols=1)
120
- unclean_audio_mono = self.to_mono(unclean_audio)
121
- unclean_spec = self.audio_to_mel_spec_tensor(unclean_audio_mono, normalize=False).numpy()
122
- clean_spec = self.audio_to_mel_spec_tensor(unclean_audio_mono, normalize=True).numpy()
123
- lbd.specshow(unclean_spec, sr=self.sr, cmap='GnBu', y_axis='mel', ax=ax[0], x_axis='time')
124
- ax[0].set(title='Uncleaned Audio')
125
- ax[0].label_outer()
126
- if self.new_sr is not None:
127
- lbd.specshow(clean_spec, sr=self.new_sr, cmap='GnBu', y_axis='mel', ax=ax[1], x_axis='time')
128
- else:
129
- lbd.specshow(clean_spec, sr=self.sr, cmap='GnBu', y_axis='mel', ax=ax[1], x_axis='time')
130
- ax[1].set(title='Cleaned Audio')
131
- ax[1].label_outer()
132
- plt.show()
133
-
134
- def audio_to_wave_tensor(self, audio, normalize=True):
135
- if normalize:
136
- return self.normalize_audio(audio)
137
- else:
138
- if isinstance(audio, torch.Tensor):
139
- return audio
140
- else:
141
- return torch.Tensor(audio)
142
-
143
- def audio_to_mel_spec_tensor(self, audio, normalize=True, explicit_sampling_rate=None):
144
- """
145
- explicit_sampling_rate is for when
146
- normalization has already been applied
147
- and that included resampling. No way
148
- to detect the current sr of the incoming
149
- audio
150
- """
151
- if explicit_sampling_rate is None:
152
- if normalize:
153
- audio = self.normalize_audio(audio)
154
- return self.logmelfilterbank(audio=audio, sampling_rate=self.final_sr)
155
- return self.logmelfilterbank(audio=audio, sampling_rate=self.sr)
156
- if normalize:
157
- audio = self.normalize_audio(audio)
158
- return self.logmelfilterbank(audio=audio, sampling_rate=explicit_sampling_rate)
159
-
160
-
161
- if __name__ == '__main__':
162
- import soundfile
163
-
164
- wav, sr = soundfile.read("../audios/test.wav")
165
- ap = AudioPreprocessor(input_sr=sr, output_sr=16000)
166
- ap.visualize_cleaning(wav)