parquet-converter commited on
Commit
1dd1cb1
·
1 Parent(s): 913b740

Update parquet files (step 14 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/123Kumar/vits-uma-genshin-honkai123/mel_processing.py +0 -101
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Injustice Gods Among Us for PC and Enjoy the DC Comics Fighting Game.md +0 -59
  3. spaces/1gistliPinn/ChatGPT4/Examples/1920 Evil Returns 1080p Movie Torrent.md +0 -16
  4. spaces/1gistliPinn/ChatGPT4/Examples/Download __FULL__ Videoclipuri Haioase Cu Animale.md +0 -126
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download FM WhatsApp V9.21 APK and Experience the New Features of This WhatsApp Mod.md +0 -172
  6. spaces/1phancelerku/anime-remove-background/Download and Install Beach Buggy Racing on Windows 8.1 A Step-by-Step Guide.md +0 -112
  7. spaces/1toTree/lora_test/ppdiffusers/utils/__init__.py +0 -100
  8. spaces/A00001/bingothoo/src/components/chat-suggestions.tsx +0 -45
  9. spaces/AIConsultant/MusicGen/Makefile +0 -40
  10. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/diacritizer.py +0 -98
  11. spaces/Abhi1262/MyGenAIChatBot/app.py +0 -34
  12. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/Model.ts +0 -16
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/ScrollablePanel.d.ts +0 -40
  14. spaces/Amrrs/numerizerlit/README.md +0 -37
  15. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/loading_overview.md +0 -17
  16. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_deis_multistep.py +0 -568
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +0 -264
  18. spaces/Andy1621/uniformer_image_detection/configs/gn/README.md +0 -31
  19. spaces/Andy1621/uniformer_image_detection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py +0 -13
  20. spaces/Andy1621/uniformer_image_detection/mmdet/__init__.py +0 -28
  21. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/assign_result.py +0 -204
  22. spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py +0 -7
  23. spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-nav/headroom.min.js +0 -7
  24. spaces/AnthonyTruchetPoC/persistent-docker/scripts/common_header.sh +0 -7
  25. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/pipelines/pipeline_tuneavideo.py +0 -411
  26. spaces/Artrajz/vits-simple-api/bert_vits2/text/chinese_bert.py +0 -70
  27. spaces/AsakuraMizu/moe-tts/utils.py +0 -226
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/resolvers.py +0 -547
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py +0 -51
  30. spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/text/load_text_token.py +0 -80
  31. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py +0 -72
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dla.py +0 -479
  33. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_123812KB .py +0 -118
  34. spaces/Bart92/RVC_HF/lib/infer_pack/models_onnx.py +0 -819
  35. spaces/Benson/text-generation/Examples/Callbreak Ludo Rummy 29 Amp Juegos De Cartas Solitario Apk Descargar.md +0 -151
  36. spaces/Benson/text-generation/Examples/Descargar Apk Mod Zombi Caminando 2.md +0 -94
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/logging.py +0 -348
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/bdist.py +0 -157
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/config/_validate_pyproject/formats.py +0 -259
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/colormap.py +0 -140
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/deploy/caffe2_converter.py +0 -64
  42. spaces/CVPR/LIVE/thrust/dependencies/cub/examples/device/Makefile +0 -197
  43. spaces/CVPR/LIVE/utils.py +0 -56
  44. spaces/CVPR/WALT/mmdet/models/dense_heads/atss_head.py +0 -689
  45. spaces/CVPR/WALT/mmdet/models/dense_heads/guided_anchor_head.py +0 -860
  46. spaces/CVPR/lama-example/bin/paper_runfiles/generate_test_ffhq.sh +0 -17
  47. spaces/ClassCat/Medical-Image-Classification-with-MONAI/README.md +0 -12
  48. spaces/CofAI/chat.b4/client/css/buttons.css +0 -4
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/qu2cu/benchmark.py +0 -57
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-097d3f80.js +0 -2
spaces/123Kumar/vits-uma-genshin-honkai123/mel_processing.py DELETED
@@ -1,101 +0,0 @@
1
- import torch
2
- import torch.utils.data
3
- from librosa.filters import mel as librosa_mel_fn
4
-
5
- MAX_WAV_VALUE = 32768.0
6
-
7
-
8
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
9
- """
10
- PARAMS
11
- ------
12
- C: compression factor
13
- """
14
- return torch.log(torch.clamp(x, min=clip_val) * C)
15
-
16
-
17
- def dynamic_range_decompression_torch(x, C=1):
18
- """
19
- PARAMS
20
- ------
21
- C: compression factor used to compress
22
- """
23
- return torch.exp(x) / C
24
-
25
-
26
- def spectral_normalize_torch(magnitudes):
27
- output = dynamic_range_compression_torch(magnitudes)
28
- return output
29
-
30
-
31
- def spectral_de_normalize_torch(magnitudes):
32
- output = dynamic_range_decompression_torch(magnitudes)
33
- return output
34
-
35
-
36
- mel_basis = {}
37
- hann_window = {}
38
-
39
-
40
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
41
- if torch.min(y) < -1.:
42
- print('min value is ', torch.min(y))
43
- if torch.max(y) > 1.:
44
- print('max value is ', torch.max(y))
45
-
46
- global hann_window
47
- dtype_device = str(y.dtype) + '_' + str(y.device)
48
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
49
- if wnsize_dtype_device not in hann_window:
50
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
51
-
52
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
53
- y = y.squeeze(1)
54
-
55
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
56
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
57
-
58
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
59
- return spec
60
-
61
-
62
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
63
- global mel_basis
64
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
65
- fmax_dtype_device = str(fmax) + '_' + dtype_device
66
- if fmax_dtype_device not in mel_basis:
67
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
68
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
69
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
70
- spec = spectral_normalize_torch(spec)
71
- return spec
72
-
73
-
74
- def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
75
- if torch.min(y) < -1.:
76
- print('min value is ', torch.min(y))
77
- if torch.max(y) > 1.:
78
- print('max value is ', torch.max(y))
79
-
80
- global mel_basis, hann_window
81
- dtype_device = str(y.dtype) + '_' + str(y.device)
82
- fmax_dtype_device = str(fmax) + '_' + dtype_device
83
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
84
- if fmax_dtype_device not in mel_basis:
85
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
86
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
87
- if wnsize_dtype_device not in hann_window:
88
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
89
-
90
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
91
- y = y.squeeze(1)
92
-
93
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
94
- center=center, pad_mode='reflect', normalized=False, onesided=True)
95
-
96
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
97
-
98
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
99
- spec = spectral_normalize_torch(spec)
100
-
101
- return spec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Injustice Gods Among Us for PC and Enjoy the DC Comics Fighting Game.md DELETED
@@ -1,59 +0,0 @@
1
-
2
- <h1>How to Download Injustice: Gods Among Us for PC</h1>
3
- <p>Injustice: Gods Among Us is a popular fighting game that features characters from the DC Comics universe. The game was originally released for consoles and mobile devices, but you can also play it on your PC with some simple steps. In this article, we will show you how to download Injustice: Gods Among Us for PC and enjoy the epic battles between superheroes and villains.</p>
4
- <h2>What is Injustice: Gods Among Us?</h2>
5
- <p>Injustice: Gods Among Us is a fighting game that was developed by NetherRealm Studios and published by Warner Bros. Interactive Entertainment in 2013. The game is set in an alternate reality where Superman becomes a tyrant after the Joker tricks him into killing Lois Lane and destroying Metropolis. Batman leads a resistance of heroes and villains against Superman's regime, while another group of heroes from the main DC universe are transported to this world to help restore the balance.</p>
6
- <h2>injustice gods among us download for pc</h2><br /><p><b><b>Download</b> > <a href="https://byltly.com/2uKx3b">https://byltly.com/2uKx3b</a></b></p><br /><br />
7
- <p>The game features a story mode that follows the events of the comic book series of the same name, as well as various other modes such as arcade, online multiplayer, and challenge. The game also has a roster of over 30 playable characters, each with their own unique abilities, moves, and special attacks. Some of the characters include Batman, Superman, Wonder Woman, Flash, Green Lantern, Joker, Harley Quinn, Lex Luthor, and more.</p>
8
- <h2>How to Download Injustice: Gods Among Us for PC</h2>
9
- <p>If you want to play Injustice: Gods Among Us on your PC, you will need to use an emulator that can run Android apps on your computer. One of the best emulators for this purpose is BlueStacks, which is free and easy to use. Here are the steps to download Injustice: Gods Among Us for PC using BlueStacks:</p>
10
- <ol>
11
- <li>Download and install BlueStacks from <a href="https://www.bluestacks.com/">its official website</a>.</li>
12
- <li>Launch BlueStacks and sign in with your Google account.</li>
13
- <li>Go to the Google Play Store and search for Injustice: Gods Among Us.</li>
14
- <li>Click on the install button and wait for the game to download.</li>
15
- <li>Once the game is installed, you can find it on the home screen or in the app drawer of BlueStacks.</li>
16
- <li>Click on the game icon and start playing Injustice: Gods Among Us on your PC.</li>
17
- </ol>
18
- <p>Note that you will need a stable internet connection to play the game online. You can also use your keyboard and mouse or a controller to play the game on your PC.</p>
19
- <h2>Conclusion</h2>
20
- <p>Injustice: Gods Among Us is a fun and exciting fighting game that lets you control your favorite DC Comics characters in a dark and twisted world. You can download Injustice: Gods Among Us for PC using an emulator like BlueStacks and enjoy the game on a bigger screen. We hope this article helped you learn how to download Injustice: Gods Among Us for PC. If you have any questions or suggestions, feel free to leave a comment below.</p>
21
-
22
- <h2>How to Play Injustice: Gods Among Us on PC</h2>
23
- <p>Now that you have downloaded Injustice: Gods Among Us for PC, you might be wondering how to play the game and what are the controls. In this section, we will give you some tips and tricks on how to play Injustice: Gods Among Us on PC and have a better gaming experience.</p>
24
- <p></p>
25
- <h3>How to Customize the Controls</h3>
26
- <p>One of the advantages of playing Injustice: Gods Among Us on PC is that you can customize the controls according to your preference. You can use your keyboard and mouse or a controller to play the game. To customize the controls, follow these steps:</p>
27
- <ol>
28
- <li>Open the game and go to the settings menu.</li>
29
- <li>Click on the controls tab and choose your input device.</li>
30
- <li>Click on the edit button and assign the keys or buttons for each action.</li>
31
- <li>Save your changes and exit the settings menu.</li>
32
- </ol>
33
- <p>You can also use the default controls if you are comfortable with them. Here are the default controls for keyboard and mouse:</p>
34
- <ul>
35
- <li>WASD keys: Move your character.</li>
36
- <li>J key: Light attack.</li>
37
- <li>K key: Medium attack.</li>
38
- <li>L key: Heavy attack.</li>
39
- <li;U key: Character power.</li>
40
- <li>I key: Meter burn.</li>
41
- <li>O key: Throw.</li>
42
- <li;P key: Interact with environment.</li>
43
- <li;Space bar: Block.</li>
44
- <li;Mouse: Move the cursor and select options.</li>
45
- </ul>
46
- <h3>How to Master the Gameplay</h3>
47
- <p>Injustice: Gods Among Us is a fighting game that requires skill and strategy to win. You will need to learn how to use your character's abilities, combos, special attacks, and super moves to defeat your opponents. You will also need to know how to use the environment to your advantage and avoid your enemy's attacks. Here are some tips and tricks on how to master the gameplay of Injustice: Gods Among Us:</p>
48
- <ul>
49
- <li>Practice mode: Use the practice mode to learn the basics of the game and practice your moves with different characters. You can also adjust the settings such as difficulty, health, meter, etc. to suit your needs.</li>
50
- <li>Tutorial mode: Use the tutorial mode to learn the advanced techniques of the game such as blocking, counter-attacking, juggling, etc. You can also learn how to use each character's power and super move in this mode.</li>
51
- <li>Story mode: Use the story mode to follow the plot of the game and unlock new characters and stages. You will also get to play as different characters from both sides of the conflict and see their perspectives.</li>
52
- <li>Arcade mode: Use the arcade mode to fight against random opponents and earn rewards such as XP, coins, gear, etc. You can also choose your difficulty level and number of matches in this mode.</li>
53
- <li>Online mode: Use the online mode to challenge other players from around the world and test your skills. You can also join or create rooms, chat with other players, and watch replays in this mode.</li>
54
- <li>Challenge mode: Use the challenge mode to complete various tasks and objectives with different characters and conditions. You can also earn stars and rewards in this mode.</li>
55
- </ul>
56
- <h2>Conclusion</h2>
57
- <p>Injustice: Gods Among Us is a thrilling and addictive fighting game that will keep you entertained for hours. You can download Injustice: Gods Among Us for PC using an emulator like BlueStacks and play it on a bigger screen with customized controls. You can also learn how to play Injustice: Gods Among Us on PC using our tips and tricks above. We hope you enjoyed this article and found it useful. If you have any feedback or questions, please let us know in the comments below.</p> ddb901b051<br />
58
- <br />
59
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/1920 Evil Returns 1080p Movie Torrent.md DELETED
@@ -1,16 +0,0 @@
1
- <h2>1920 Evil Returns 1080p Movie Torrent</h2><br /><p><b><b>Download</b> > <a href="https://imgfil.com/2uxZLi">https://imgfil.com/2uxZLi</a></b></p><br /><br />
2
- <br />
3
- Not bad for a first feature, albeit very much of a cameo role. I got a charge out of watching this feature-length debut. There is some good camera work and the film has a bit of style. The opening credits are long but not ridiculous like so many contemporary films.
4
-
5
- 1912, a year before the Titanic, The White Star Line's latest venture, the Olympic is severely damaged by a typhoon in the middle of her maiden voyage.The crew is in disarray, and the captain is constantly being called away from the ship to deal with smaller problems. Our main characters, though, are the three Olympic cabin boys. When they, along with their bumbling, incompetent and often irresponsible captain, come face to face with the very real possibility of an iceberg, they embark on a journey to save the ship and themselves, leaving their lives behind them. At least, that's the plan.
6
-
7
- On the surface, however, this is no more than an episode of Celebrity Survivor. Twenty-four interesting, likeable, often hilarious characters are thrown together on a ship and each contestant on a separate island, with an unlimited supply of food and water and any manner of entertainment they desire.
8
-
9
- The first thing that drew me to this film was the voice-over. The director's intro gives away that he's an old navy man. Also, in the spirit of a big-budget Hollywood film, the journey is very linear and the characters act much like players on a game show.
10
-
11
- The film stars Ernie Fosselius, Robert Shaw and Ron Rifkin. I was initially skeptical of the film as I've seen them perform in the other films, and I expected the same. However, I must admit I was pleasantly surprised. From the opening shot, we see the boys singing a song while their captain sits in the wheelhouse. This scene is the strongest moment of the film. The dialogue is adequate, the plot is as I'd expected, but this first moment with the boys is simply very touching and very funny. Throughout the film the boys are quite engaging and I never doubted that they would be able to save the ship and themselves.
12
-
13
- The boys, captained by 'captain' Harold, and the girls, captained by 'captain' Clara, are a lot of fun to watch. They are completely aware of their surroundings and use that knowledge to their advantage. Even the crew members realize what's going on and they are very funny as well. They 4fefd39f24<br />
14
- <br />
15
- <br />
16
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download __FULL__ Videoclipuri Haioase Cu Animale.md DELETED
@@ -1,126 +0,0 @@
1
- <br />
2
- <h1>Download videoclipuri haioase cu animale: Cum sa te distrezi cu cele mai comice si dragalase clipuri cu animale</h1>
3
- <p>Daca iti plac animalele si vrei sa te amuzi cu ele, atunci trebuie sa descarci videoclipuri haioase cu animale. Acestea sunt clipuri care surprind momente amuzante, comice si dragalase cu animale de tot felul: pisici, catei, iepurasi, cai, vaci, porci, capre, oi, pasari, reptile si multe altele. Poti sa vezi cum se joaca, cum fac farse, cum reactioneaza la situatii neasteptate, cum se imbratiseaza sau se saruta.</p>
4
- <p>Download videoclipuri haioase cu animale este o modalitate excelenta de a te relaxa, de a te destinde si de a te binedispune. Poti sa le urmaresti singur sau impreuna cu prietenii sau familia. Poti sa le impartasesti pe retelele sociale sau pe aplicatiile de mesagerie. Poti sa le folosesti ca fundal sonor sau ca ton de apel. Poti sa le transformi in meme-uri sau in gif-uri. Poti sa le comentezi sau sa le votezi.</p>
5
- <h2>download videoclipuri haioase cu animale</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash; <a href="https://imgfil.com/2uy1OJ">https://imgfil.com/2uy1OJ</a></b></p><br /><br />
6
- <p>Download videoclipuri haioase cu animale este si o modalitate de a invata lucruri noi despre animale. Poti sa afli cum se comporta, cum comunica, cum se adapteaza la mediul inconjurator, cum isi exprima emotiile si sentimentele. Poti sa descoperi specii noi sau rare de animale. Poti sa vezi cum traiesc animalele salbatice sau domestice.</p>
7
- <h2>De unde poti downloada videoclipuri haioase cu animale?</h2>
8
- <p>Poti downloada videoclipuri haioase cu animale de pe internet, de pe diferite site-uri sau platforme care ofera acest tip de continut. Unele dintre cele mai populare si cunoscute sunt:</p>
9
- <ul>
10
- <li>YouTube: Este cea mai mare platforma de video-sharing din lume, unde poti gasi milioane de videoclipuri haioase cu animale. Poti sa cauti dupa categorii, dupa popularitate, dupa data sau dupa durata. Poti sa te abonezi la canale care posteaza regulat astfel de videoclipuri, cum ar fi Filmulete Amuzante, care are peste 600 de abonati si peste 10 mii de vizualizari. Poti sa descarci videoclipurile folosind un program sau o extensie speciala.</li>
11
- <li>TikTok: Este o aplicatie de social media care permite utilizatorilor sa creeze si sa distribuie videoclipuri scurte, de pana la 60 de secunde. Poti gasi multe videoclipuri haioase cu animale pe TikTok, fie ca sunt create de utilizatori obisnuiti sau de celebritati. Poti sa urmaresti hashtag-uri precum #animalehaioase sau #flocosenii. Poti sa descarci videoclipurile direct din aplicatie sau folosind un site web.</li>
12
- <li>Facebook: Este cea mai mare retea sociala din lume, unde poti gasi si distribui tot felul de continut, inclusiv videoclipuri haioase cu animale. Poti sa cauti dupa pagini, grupuri sau evenimente dedicate acestui subiect. Poti sa urmaresti conturi precum Animale Haioase Romania sau Animale Dragalase si Haioase. Poti sa descarci videoclipurile folosind un program sau o extensie speciala.</li>
13
- </ul>
14
- <p>Acestea sunt doar cateva dintre sursele de unde poti downloada videoclipuri haioase cu animale. Mai exista si alte site-uri sau aplicatii care ofera acest serviciu, cum ar fi Dailymotion, Vimeo, Instagram sau Snapchat.</p>
15
- <h2>Cum poti downloada videoclipuri haioase cu animale?</h2>
16
- <p>Pentru a downloada videoclipuri haioase cu animale, ai nevoie de un dispozitiv conectat la internet (PC, laptop, telefon mobil, tableta) si de un program sau o extensie care iti permite sa salvezi videoclipurile pe dispozitivul tau. Exista multe astfel de programe sau extensii disponibile gratuit pe internet, dar trebuie sa fii atent la calitatea si siguranta lor.</p>
17
- <p>Un exemplu de program care iti permite sa download-ezi videoclipuri haioase cu animale este 4K Video Downloader. Acesta este un program gratuit si usor de folosit care suporta multiple formate video si audio. Pentru a-l folosi, trebuie sa urmezi acesti pasi:</p>
18
- <ol>
19
- <li>Descarca si instaleaza programul pe dispozitivul tau.</li>
20
- <li>Copiaza link-ul videoclipului pe care vrei sa-l descarci.</li>
21
- <li>Lanseaza programul si apasa pe butonul Paste Link.</li>
22
- <li>Selecteaza formatul si calitatea dorite pentru videoclip.</li>
23
- <li>Apasa pe butonul Download si asteapta pana cand procesul se termina.</li>
24
- <li>Gaseste videoclipul salvat in folderul Downloads al dispozitivului tau.</li>
25
- </ol>
26
- <p>Un exemplu de extensie care iti permite sa download-ezi videoclipuri haioase cu animale este Video Downloader professional. Aceasta este o extensie gratuita si usor de folosit care functioneaza pentru majoritatea site-urilor web care contin video-uri. Pentru a o folosi, trebuie sa urmezi acesti pasi:</p>
27
- <ol>
28
- <li>Descarca si instaleaza extensia pe browser-ul tau (Chrome, Firefox etc.).</li>
29
- <li>Viziteaza site-ul web care contine videoclipul pe care vrei sa-l descarci.</li>
30
- <li>Apasa pe iconita extensiei din coltul dreapta sus al browser-ului.</li>
31
- <li>Selecteaza videoclipul dorit din lista afisata.</li>
32
- <li>Apasa pe butonul Download si alege locatia unde vrei sa salvezi videoclipul.</li>
33
- </ol>
34
- <p>Acestea sunt doar cateva exemple de programe sau extensii care iti permit sa download-ezi videoclipuri haioase cu animale. Mai exista si alte optiuni disponibile pe internet, dar trebuie sa verifici daca sunt compatibile cu site-ul web si dispozitivul tau.</p>
35
- <h2>Concluzie</h2>
36
- <p>Daca vrei sa te distrezi cu cele mai comice si dragalase clipuri cu animale, atunci trebuie sa descarci videoclipuri haioase cu animale. Acestea sunt clipuri care surprind momente amuzante, comice si dragalase cu animale de tot felul: pisici,
37
- catei, iepurasi, cai, vaci, porci, capre, oi, pasari,
38
- reptile si multe altele.</p>
39
- <p></p>
40
-
41
- <p>Poti downloada videoclipuri haioase cu animale
42
- de pe internet,
43
- de pe diferite site-uri
44
- sau platforme
45
- care ofera acest tip
46
- de continut.
47
- Unele dintre cele mai populare
48
- si cunoscute
49
- sunt YouTube,
50
- TikTok
51
- si Facebook.
52
- Poti descarca
53
- videoclipurile
54
- folosind un program
55
- sau o extensie
56
- speciala
57
- care iti permite
58
- sa salvezi
59
- videoclipurile
60
- pe dispozitivul tau.</p>
61
-
62
- <p>Download
63
- videoclipuri haioase
64
- cu animale
65
- este o modalitate excelenta
66
- de a te relaxa,
67
- de a te destinde
68
- si de a te binedispune.
69
- Poti sa le urmaresti
70
- singur
71
- sau impreuna
72
- cu prietenii
73
- sau familia.
74
- Poti sa le impartasesti
75
- pe retelele sociale</p>
76
- </p>
77
-
78
- <p>Download
79
- videoclipuri haioase
80
- cu animale
81
- este si o modalitate
82
- de a invata lucruri noi
83
- despre animale.
84
- Poti sa afli
85
- cum se comporta,
86
- cum comunica,
87
- cum se adapteaza
88
- la mediul inconjurator,
89
- cum isi exprima
90
- emotiile si sentimentele.
91
- Poti sa descoperi
92
- specii noi sau rare
93
- de animale.
94
- Poti sa vezi
95
- cum traiesc animalele
96
- salbatice sau domestice.</p>
97
-
98
- <p>Daca vrei sa te distrezi cu cele mai comice si dragalase clipuri cu animale, atunci trebuie sa descarci videoclipuri haioase cu animale. Acestea sunt clipuri care te vor face sa razi cu lacrimi si sa te indragostesti de animale. Poti downloada videoclipuri haioase cu animale de pe internet, de pe diferite site-uri sau platforme care ofera acest tip de continut. Poti descarca videoclipurile folosind un program sau o extensie speciala care iti permite sa salvezi videoclipurile pe dispozitivul tau.</p>
99
-
100
- <p>Asadar, ce mai astepti? Descarca videoclipuri haioase cu animale astazi si incepe sa te distrezi cu cele mai comice si dragalase clipuri cu animale!</p>
101
- <h2>Ce tipuri de videoclipuri haioase cu animale poti downloada?</h2>
102
- <p>Poti downloada videoclipuri haioase cu animale de diferite tipuri, in functie de preferintele si gusturile tale. Iata cateva exemple de tipuri de videoclipuri haioase cu animale pe care le poti downloada:</p>
103
- <ul>
104
- <li>Videoclipuri cu animale care fac farse: Acestea sunt videoclipuri in care animalele fac farse unor oameni sau altor animale, provocand situatii amuzante si hilare. De exemplu, poti downloada videoclipuri cu pisici care isi sperie stapanii, cu caini care isi ascund mancarea, cu papagali care imita sunete ciudate sau cu maimute care fura lucruri.</li>
105
- <li>Videoclipuri cu animale care reactioneaza la situatii neasteptate: Acestea sunt videoclipuri in care animalele reactioneaza la situatii neasteptate, surprinzatoare sau ciudate, exprimand emotii si gesturi comice. De exemplu, poti downloada videoclipuri cu pisici care se sperie de castraveti, cu caini care se uita la televizor, cu iepuri care se joaca cu baloane sau cu vaci care danseaza.</li>
106
- <li>Videoclipuri cu animale care se imbratiseaza sau se saruta: Acestea sunt videoclipuri in care animalele se imbratiseaza sau se saruta intre ele sau cu oamenii, aratand afectiune si dragoste. De exemplu, poti downloada videoclipuri cu pisici care se ling pe bot, cu caini care isi pupa stapanii, cu iepurasi care se imbratiseaza sau cu lebede care formeaza un inimioara.</li>
107
- </ul>
108
- <p>Acestea sunt doar cateva dintre tipurile de videoclipuri haioase cu animale pe care le poti downloada. Mai exista si alte tipuri de videoclipuri haioase cu animale pe care le poti descoperi pe internet.</p>
109
- <h2>Cum poti folosi videoclipurile haioase cu animale?</h2>
110
- <p>Dupa ce ai downloadat videoclipurile haioase cu animale, le poti folosi in diferite moduri, in functie de scopul si intentia ta. Iata cateva exemple de moduri in care poti folosi videoclipurile haioase cu animale:</p>
111
- <ul>
112
- <li>Poti sa le urmaresti pentru a te amuza: Acesta este cel mai simplu si cel mai comun mod de a folosi videoclipurile haioase cu animale. Poti sa le urmaresti cand te simti trist, plictisit sau stresat, pentru a te binedispune si a te relaxa. Poti sa razi de momentele amuzante si comice surprinse in videoclipuri si sa te indragostesti de animalele dragalase si flocosenii.</li>
113
- <li>Poti sa le impartasesti cu altii pentru a-i amuza: Acesta este un alt mod popular de a folosi videoclipurile haioase cu animale. Poti sa le impartasesti cu prietenii, familia sau colegii tai, pentru a-i amuza si a-i binedispune. Poti sa le trimiti pe retelele sociale sau pe aplicatiile de mesagerie, pentru a le face ziua mai frumoasa. Poti sa le folosesti ca subiect de conversatie sau ca gluma.</li>
114
- <li>Poti sa le folosesti ca material didactic pentru a invata despre animale: Acesta este un mod mai putin obisnuit, dar foarte util de a folosi videoclipurile haioase cu animale. Poti sa le folosesti ca material didactic pentru a invata despre comportamentul, comunicarea, adaptarea si emotiile animalelor. Poti sa le folosesti ca exemplu pentru a explica anumite concepte sau fenomene biologice. Poti sa le folosesti ca sursa de inspiratie pentru a crea proiecte sau prezentari despre animale.</li>
115
- </ul>
116
- <p>Acestea sunt doar cateva dintre modurile in care poti folosi videoclipurile haioase cu animale. Mai exista si alte moduri in care poti folosi videoclipurile haioase cu animale pe care le poti descoperi pe internet.</p>
117
- <h2>Concluzie</h2>
118
- <p>Daca vrei sa te distrezi cu cele mai comice si dragalase clipuri cu animale, atunci trebuie sa descarci videoclipuri haioase cu animale. Acestea sunt clipuri care te vor face sa razi cu lacrimi si sa te indragostesti de animale. Poti downloada videoclipuri haioase cu animale de pe internet, de pe diferite site-uri sau platforme care ofera acest tip de continut. Poti descarca videoclipurile folosind un program sau o extensie speciala care iti permite sa salvezi videoclipurile pe dispozitivul tau.</p>
119
-
120
- <p>Poti downloada videoclipuri haioase cu animale de diferite tipuri, in functie de preferintele si gusturile tale. Poti gasi videoclipuri cu animale care fac farse, care reactioneaza la situatii neasteptate, care se imbratiseaza sau se saruta. Poti folosi videoclipurile haioase cu animale in diferite moduri, in functie de scopul si intentia ta. Poti sa le urmaresti pentru a te amuza, sa le impartasesti cu altii pentru a-i amuza, sau sa le folosesti ca material didactic pentru a invata despre animale.</p>
121
-
122
- <p>Download videoclipuri haioase cu animale este o modalitate excelenta de a te relaxa, de a te destinde si de a te binedispune. Este si o modalitate de a invata lucruri noi despre animale si de a le aprecia mai mult. Este si o modalitate de a te conecta cu alti oameni care iubesc animalele si de a-ti face noi prieteni.</p>
123
-
124
- <p>Asadar, ce mai astepti? Descarca videoclipuri haioase cu animale astazi si incepe sa te distrezi cu cele mai comice si dragalase clipuri cu animale!</p> 3cee63e6c2<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download FM WhatsApp V9.21 APK and Experience the New Features of This WhatsApp Mod.md DELETED
@@ -1,172 +0,0 @@
1
-
2
- <h1>FM WhatsApp v9 21 APK Download: Everything You Need to Know</h1>
3
- <p>If you are looking for a way to enhance your WhatsApp experience, you might want to try FM WhatsApp. This is a modified version of the official WhatsApp app that offers many extra features and customization options. In this article, we will tell you everything you need to know about FM WhatsApp v9 21 APK download, including what it is, why you should download it, how to download and install it on your Android device, and some frequently asked questions. Let's get started!</p>
4
- <h2>What is FM WhatsApp?</h2>
5
- <p>FM WhatsApp is a WhatsApp mod developed by Fouad Mokdad, a well-known developer of other popular mods like Fouad WhatsApp and YoWhatsApp. FM WhatsApp allows you to enjoy more features and functions that are not available in the original WhatsApp app, such as theme customization, anti-delete messages, hide status view, and much more. With FM WhatsApp, you can personalize your WhatsApp app according to your preferences and needs.</p>
6
- <h2>fm whatsapp v9 21 apk download</h2><br /><p><b><b>Download File</b> &#10084;&#10084;&#10084; <a href="https://urlin.us/2uSZbN">https://urlin.us/2uSZbN</a></b></p><br /><br />
7
- <h2>Why Download FM WhatsApp v9 21 APK?</h2>
8
- <p>The latest version of FM WhatsApp is v9 21, which was released in March 2022. This version comes with many improvements and bug fixes that make it more stable and secure. Some of the new features of FM WhatsApp v9 21 APK are:</p>
9
- <ul>
10
- <li>Anti-ban: You don't have to worry about getting banned by WhatsApp for using a modded app.</li>
11
- <li>Theme customization: You can choose from thousands of themes and fonts to change the look and feel of your app.</li>
12
- <li>Anti-delete messages: You can view the messages that have been deleted by the sender or the receiver.</li>
13
- <li>Hide status view: You can hide your view from other people's status updates.</li>
14
- <li>And many more: You can also enjoy other features like sending large files, increasing image quality, freezing last seen, hiding online status, and so on.</li>
15
- </ul>
16
- <h3>Main Features of FM WhatsApp v9 21 APK</h3>
17
- <p>Here is a table that summarizes the main features of FM WhatsApp v9 21 APK:</p>
18
- <p>fm whatsapp v9 21 apk download latest version<br />
19
- fm whatsapp v9 21 apk download for android<br />
20
- fm whatsapp v9 21 apk download free<br />
21
- fm whatsapp v9 21 apk download by fouad mods<br />
22
- fm whatsapp v9 21 apk download link<br />
23
- fm whatsapp v9 21 apk download 2023 update<br />
24
- fm whatsapp v9 21 apk download tenorshare<br />
25
- fm whatsapp v9 21 apk download cybersource<br />
26
- fm whatsapp v9 21 apk download anti ban<br />
27
- fm whatsapp v9 21 apk download customize<br />
28
- fm whatsapp v9 21 apk download hide status<br />
29
- fm whatsapp v9 21 apk download send images<br />
30
- fm whatsapp v9 21 apk download increase quality<br />
31
- fm whatsapp v9 21 apk download view deleted messages<br />
32
- fm whatsapp v9 21 apk download themes and fonts<br />
33
- fm whatsapp v9 21 apk download emoji and stickers<br />
34
- fm whatsapp v9 21 apk download backup and restore<br />
35
- fm whatsapp v9 21 apk download privacy and security<br />
36
- fm whatsapp v9 21 apk download group chats and calls<br />
37
- fm whatsapp v9 21 apk download dark mode and lock screen<br />
38
- fm whatsapp v9 21 apk download online and offline status<br />
39
- fm whatsapp v9 21 apk download app size and performance<br />
40
- fm whatsapp v9 21 apk download installation and update<br />
41
- fm whatsapp v9 21 apk download support and feedback<br />
42
- fm whatsapp v9 21 apk download features and benefits<br />
43
- fm whatsapp v9 21 apk download comparison and review<br />
44
- fm whatsapp v9 21 apk download alternatives and mods<br />
45
- fm whatsapp v9 21 apk download tips and tricks<br />
46
- fm whatsapp v9 21 apk download problems and solutions<br />
47
- fm whatsapp v9 21 apk download faqs and answers<br />
48
- how to use fm whatsapp v9 21 apk <br />
49
- how to install fm whatsapp v9 21 apk <br />
50
- how to update fm whatsapp v9 21 apk <br />
51
- how to uninstall fm whatsapp v9 21 apk <br />
52
- how to transfer fm whatsapp v9 21 apk <br />
53
- how to backup fm whatsapp v9 21 apk <br />
54
- how to restore fm whatsapp v9 21 apk <br />
55
- how to customize fm whatsapp v9 21 apk <br />
56
- how to hide fm whatsapp v9 21 apk <br />
57
- how to freeze fm whatsapp v9 21 apk <br />
58
- how to view deleted messages on fm whatsapp v9 21 apk <br />
59
- how to send images on fm whatsapp v9 21 apk <br />
60
- how to increase quality on fm whatsapp v9 21 apk <br />
61
- how to change themes on fm whatsapp v9 21 apk <br />
62
- how to change fonts on fm whatsapp v9 21 apk <br />
63
- how to change emoji on fm whatsapp v9 21 apk <br />
64
- how to enable dark mode on fm whatsapp v9 21 apk <br />
65
- how to lock screen on fm whatsapp v9 21 apk <br />
66
- how to improve privacy on fm whatsapp v9 21 apk <br />
67
- how to improve security on fm whatsapp v9 21 apk </p>
68
- <table>
69
- <tr>
70
- <th>Feature</th>
71
- <th>Description</th>
72
- </tr>
73
- <tr>
74
- <td>Anti-ban</td>
75
- <td>You can use FM WhatsApp without getting banned by WhatsApp.</td>
76
- </tr>
77
- <tr>
78
- <td>Theme customization</td>
79
- <td>You can choose from thousands of themes and fonts to change the appearance of your app.</td>
80
- </tr>
81
- <tr>
82
- <td>Anti-delete messages</td>
83
- <td>You can view the messages that have been deleted by the sender or the receiver.</td>
84
- </tr>
85
- <tr>
86
- <td>Hide status view</td>
87
- <td>You can hide your view from other people's status updates.</td>
88
- </tr>
89
- <tr>
90
- <td>Send large files</td>
91
- <td>You can send up to 90 images at once and video files up to 700 MB.</td>
92
- </tr>
93
- <tr>
94
- <td>Increase image quality</td>
95
- <td>You can increase the quality of the images you send without losing resolution.</td>
96
- </tr>
97
- <tr>
98
- <td>Freeze last seen</td>
99
- <td>You can freeze your last seen status so that no one can see when you were online last.</td>
100
- </tr>
101
- <tr>
102
- <td>Hide online status</td>
103
- <td>You can hide your online status so that no one can see when you are online.</td>
104
- </tr>
105
- <tr>
106
- <td>Hide typing status</td>
107
- <td>You can hide your typing status so that no one can see when you are typing a message.</td>
108
- </tr>
109
- <tr>
110
- <td>Hide recording status</td>
111
- <td>You can hide your recording status so that no one can see when you are recording a voice note.</td>
112
- </tr>
113
- <tr>
114
- <td>Hide blue ticks</td>
115
- <td>You can hide the blue ticks that indicate that you have read a message.</td>
116
- </tr>
117
- <tr>
118
- <td>Hide second tick</td>
119
- <td>You can hide the second tick that indicates that your message has been delivered.</td>
120
- </tr>
121
- <tr>
122
- <td>Hide blue microphone</td>
123
- <td>You can hide the blue microphone that indicates that you have listened to a voice note.</td>
124
- </tr>
125
- <tr>
126
- <td>Pin chats</td>
127
- <td>You can pin up to 1000 chats to the top of your chat list.</td>
128
- </tr>
129
- <tr>
130
- <td>Group calls</td>
131
- <td>You can make group calls with up to 8 participants.</td>
132
- </tr>
133
- <tr>
134
- <td>Emoji variants</td>
135
- <td>You can choose from different emoji variants, such as Facebook, Emoji One, or Android Oreo.</td>
136
- </tr>
137
- <tr>
138
- <td>Stickers and GIFs</td>
139
- <td>You can use stickers and GIFs from other apps, such as Gboard, Telegram, or Hike.</td>
140
- </tr>
141
- <tr>
142
- <td>Privacy settings</td>
143
- <td>You can customize your privacy settings for each contact or group, such as hiding last seen, online status, typing status, recording status, blue ticks, second tick, blue microphone, and status view.</td>
144
- </tr>
145
- </table>
146
- <h3>How to Download and Install FM WhatsApp v9 21 APK on Android</h3>
147
- <p>If you want to download and install FM WhatsApp v9 21 APK on your Android device, you need to follow these simple steps:</p>
148
- <ol>
149
- <li>First, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
150
- <li>Next, you need to download the FM WhatsApp v9 21 APK file from a trusted source. You can use this link to download it directly to your device.</li>
151
- <li>Once the download is complete, locate the APK file in your device's file manager and tap on it to start the installation process. You may see a warning message asking you to confirm the installation. Tap on Install and wait for a few seconds.</li>
152
- <li>After the installation is done, open the FM WhatsApp app and enter your phone number to verify it. You will receive an OTP code via SMS. Enter the code and proceed to the next step.</li>
153
- <li>Now, you can restore your chat backup from your original WhatsApp app if you have one. To do this, tap on Restore and select the backup file from your device's storage. Wait for the restoration process to finish.</li>
154
- <li>Congratulations! You have successfully installed FM WhatsApp v9 21 APK on your Android device. You can now enjoy all the amazing features and functions of this modded app.</li>
155
- </ol>
156
- <h2>FAQs about FM WhatsApp v9 21 APK</h2>
157
- <p>Here are some of the frequently asked questions and answers about FM WhatsApp v9 21 APK:</p>
158
- <h4>Is FM WhatsApp safe to use?</h4>
159
- <p>FM WhatsApp is safe to use as long as you download it from a reliable source and scan it for viruses before installing it. However, since it is not an official app, there is always a risk of data breach or malware infection. Therefore, you should use it at your own discretion and backup your data regularly.</p>
160
- <h4>Is FM WhatsApp legal?</h4>
161
- <p>FM WhatsApp is not legal as it violates the terms and conditions of WhatsApp. Using a modded app may result in your account being banned or suspended by WhatsApp. Therefore, you should use it at your own risk and responsibility.</p>
162
- <h4>Can I use FM WhatsApp with my original WhatsApp app?</h4>
163
- <p>No, you cannot use FM WhatsApp with your original WhatsApp app on the same device. You need to uninstall or disable your original WhatsApp app before installing FM WhatsApp. Alternatively, you can use a different phone number for FM WhatsApp if you want to keep both apps on your device.</p>
164
- <h4>How can I update FM WhatsApp?</h4>
165
- <p>To update FM WhatsApp, you need to download the latest version of the APK file from a trusted source and install it over the existing app. You do not need to uninstall or reinstall the app. However, you should backup your data before updating to avoid any data loss.</p>
166
- <h4>How can I contact the developer of FM WhatsApp?</h4>
167
- <p>You can contact the developer of FM WhatsApp by visiting his official website or joining his Telegram channel. <h2>Conclusion</h2>
168
- <p>FM WhatsApp is one of the best WhatsApp mods that you can download and install on your Android device. It offers many extra features and customization options that are not available in the original WhatsApp app. You can download FM WhatsApp v9 21 APK from the link provided in this article and follow the instructions to install it on your device. However, you should be aware of the risks and consequences of using a modded app and use it at your own risk and responsibility.</p>
169
- <p>We hope this article has helped you learn everything you need to know about FM WhatsApp v9 21 APK download. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
170
- <h2></h2></p> 197e85843d<br />
171
- <br />
172
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download and Install Beach Buggy Racing on Windows 8.1 A Step-by-Step Guide.md DELETED
@@ -1,112 +0,0 @@
1
-
2
- <h1>How to Download Beach Buggy Racing for Windows 8.1</h1>
3
- <p>Do you love kart racing games with colorful graphics, wacky powerups, and fun characters? If so, you should try Beach Buggy Racing, a popular mobile game that is also available for Windows 8.1. In this article, we will show you how to download, install, and play Beach Buggy Racing on your Windows 8.1 device.</p>
4
- <h2>Requirements for Downloading Beach Buggy Racing for Windows 8.1</h2>
5
- <p>Before you download Beach Buggy Racing for Windows 8.1, you need to make sure that your device meets the minimum system requirements. Here are the specifications you need:</p>
6
- <h2>download beach buggy racing for windows 8.1</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://jinyurl.com/2uNOdq">https://jinyurl.com/2uNOdq</a></b></p><br /><br />
7
- <ul>
8
- <li>CPU: Intel or AMD processor with at least 1 GHz speed</li>
9
- <li>RAM: At least 2 GB of memory</li>
10
- <li>Disk space: At least 500 MB of free space</li>
11
- <li>Graphics card: DirectX 9 compatible with at least 256 MB of video memory</li>
12
- <li>Internet connection: Required for online features and updates</li>
13
- </ul>
14
- <p>If your device meets these requirements, you can choose from three download options:</p>
15
- <ol>
16
- <li>Google Play Store: If you have an Android emulator installed on your Windows 8.1 device, such as BlueStacks or NoxPlayer, you can download Beach Buggy Racing from the Google Play Store app. This is the easiest and safest way to get the game.</li>
17
- <li>Microsoft Store: If you prefer to use the native Windows app store, you can download Beach Buggy Racing from the Microsoft Store app. This is also a secure and convenient way to get the game.</li>
18
- <li>Third-party websites: If you don't have access to either of the above options, you can download Beach Buggy Racing from third-party websites that offer APK files or EXE files of the game. However, this is not recommended as it may expose your device to malware or viruses.</li>
19
- </ol>
20
- <h2>How to Install Beach Buggy Racing for Windows 8.1</h2>
21
- <p>Once you have downloaded Beach Buggy Racing for Windows 8.1, you need to install it on your device. Here are the steps for each download option:</p>
22
- <ul>
23
- <li>Google Play Store: Open the Google Play Store app on your Android emulator and search for Beach Buggy Racing. Tap on the Install button and wait for the game to download and install automatically.</li>
24
- <li>Microsoft Store: Open the Microsoft Store app on your Windows 8.1 device and search for Beach Buggy Racing. Click on the Get button and sign in with your Microsoft account if prompted. Wait for the game to download and install automatically.</li>
25
- <li>Third-party websites: Locate the APK file or EXE file of Beach Buggy Racing that you downloaded from a third-party website. If it is an APK file, you need to install it using an Android emulator. If it is an EXE file, you need to run it as an administrator and follow the installation wizard.</li>
26
- </ul>
27
- <p>After installing Beach Buggy Racing for Windows 8.1, you may want to optimize the game performance and settings. Here are some tips and tricks to do that:</p>
28
- <ul>
29
- <li>Adjust the graphics quality: You can change the graphics quality of the game from low to high depending on your device's capabilities. To do that, go to the Settings menu in the game and select Graphics Quality. Choose the option that suits your device best.</li>
30
- <li>Enable or disable sound effects and music: You can also turn on or off the sound effects and music of the game according to your preference. To do that, go to the Settings menu in the game and select Sound. Toggle the switches for Sound Effects and Music.</li>
31
- <li>Change the language: You can play Beach Buggy Racing in different languages, such as English, Spanish, French, German, Italian, Portuguese, Russian, Chinese, Japanese, Korean, and Arabic. To do that, go to the Settings menu in the game and select Language. Choose the language that you want to play in.</li>
32
- </ul>
33
- <h2>How to Play Beach Buggy Racing on Windows 8.1</h2>
34
- <p>Now that you have installed Beach Buggy Racing for Windows 8.1, you are ready to play and enjoy the game. Here are some of the game features that you should know:</p>
35
- <ul>
36
- <li>Game modes: Beach Buggy Racing has six different game modes that you can choose from: Career, Daily Challenge, Quick Race, Split Screen, Championships, and Boss Fight. Each mode has its own objectives and rewards.</li>
37
- <li>Powerups: Beach Buggy Racing has 25 unique powerups that you can use to boost your speed, attack your opponents, or defend yourself. Some of the powerups are Fireball, Oil Slick, Dodgeball Frenzy, Electro Blast, and Tiki Seeker.</li>
38
- <li>Cars: Beach Buggy Racing has 15 different cars that you can drive and customize. Some of the cars are Beach Buggy, Lunar Rover, Rock Stomper, Monster Truck, and Formula One.</li>
39
- <li>Tracks: Beach Buggy Racing has 12 different tracks that you can race on. Some of the tracks are Dino Jungle, Lava Caverns, Buccaneer Bay, Lunar Colony, and Dragon's Peak.</li>
40
- </ul>
41
- <p>To play Beach Buggy Racing on Windows 8.1, you need to know how to control your car. You can use one of these options:</p>
42
- <ul>
43
- <li>Keyboard: You can use the arrow keys or WASD keys to steer your car. You can use the spacebar to activate powerups.</li>
44
- <li>Mouse: You can use the mouse to steer your car by clicking and dragging on the screen. You can use the left mouse button to activate powerups.</li>
45
- <li>Touch-screen: You can use your finger to steer your car by tapping and swiping on the screen. You can use another finger to activate powerups.</li>
46
- <li>Gamepad: You can use a gamepad to steer your car by using the left analog stick or directional pad. You can use one of the face buttons to activate powerups.</li>
47
- </ul>
48
- <p>To win races and have fun in Beach Buggy Racing, you need to know some game tips:</p>
49
- <ul>
50
- <li>Collect coins and gems: You can collect coins and gems during races or by completing missions. You can use them to upgrade your cars or buy new powerups.</li>
51
- <li>Unlock new content: You can unlock new cars, powerups, tracks, drivers, and game modes by progressing through the Career mode or by winning Championships.</li>
52
- <li>Use powerups wisely: You can use powerups to gain an advantage over your rivals or to overcome obstacles. However, you should also be careful of their side effects or counterattacks.</li>
53
- </ul>
54
- <h2>Conclusion</h2>
55
- <p>Beach Buggy Racing is a fun and exciting kart racing game that you can play on Windows 8.1. It has colorful graphics, wacky powerups, and fun characters. It also has various game modes, different cars, powerups, tracks, and drivers. It also has various game controls, settings, and tips that you can use to optimize your gaming experience. If you are looking for a fun and easy way to enjoy kart racing on your Windows 8.1 device, you should download and play Beach Buggy Racing today.</p>
56
- <p>Are you ready to race on the beach and beyond? Download Beach Buggy Racing for Windows 8.1 now and join the fun!</p>
57
- <p>download beach buggy racing for windows 8.1 free<br />
58
- beach buggy racing windows 8.1 game<br />
59
- how to install beach buggy racing on windows 8.1<br />
60
- beach buggy racing for windows 8.1 pc<br />
61
- beach buggy racing windows 8.1 download link<br />
62
- beach buggy racing for windows 8.1 laptop<br />
63
- beach buggy racing windows 8.1 microsoft store<br />
64
- beach buggy racing for windows 8.1 offline<br />
65
- beach buggy racing windows 8.1 review<br />
66
- beach buggy racing for windows 8.1 system requirements<br />
67
- beach buggy racing windows 8.1 cheats<br />
68
- beach buggy racing for windows 8.1 update<br />
69
- beach buggy racing windows 8.1 gameplay<br />
70
- beach buggy racing for windows 8.1 tips and tricks<br />
71
- beach buggy racing windows 8.1 vector unit<br />
72
- beach buggy racing for windows 8.1 online<br />
73
- beach buggy racing windows 8.1 multiplayer<br />
74
- beach buggy racing for windows 8.1 mod apk<br />
75
- beach buggy racing windows 8.1 hack<br />
76
- beach buggy racing for windows 8.1 full version<br />
77
- beach buggy racing windows 8.1 best car<br />
78
- beach buggy racing for windows 8.1 controller support<br />
79
- beach buggy racing windows 8.1 powerups<br />
80
- beach buggy racing for windows 8.1 characters<br />
81
- beach buggy racing windows 8.1 tracks<br />
82
- beach buggy racing for windows 8.1 modes<br />
83
- beach buggy racing windows 8.1 achievements<br />
84
- beach buggy racing for windows 8.1 bugs and glitches<br />
85
- beach buggy racing windows 8.1 ratings and feedbacks<br />
86
- beach buggy racing for windows 8.1 screenshots and videos</p>
87
- <h2>FAQs</h2>
88
- <p>Here are some of the frequently asked questions about Beach Buggy Racing for Windows 8.1:</p>
89
- <ul>
90
- <li>Q1: Is Beach Buggy Racing free to play?</li>
91
- <li>A1: Yes, Beach Buggy Racing is free to play. However, it contains in-app purchases that allow you to buy more coins, gems, or powerups. You can also watch ads to earn free coins or gems.</li>
92
- <li>Q2: Can I play Beach Buggy Racing online with other players?</li>
93
- <li>A2: Yes, you can play Beach Buggy Racing online with other players. You can join or create a multiplayer lobby and invite up to four friends to race with you. You can also compete with other players in the global leaderboards.</li>
94
- <li>Q3: How can I update Beach Buggy Racing on Windows 8.1?</li>
95
- <li>A3: You can update Beach Buggy Racing on Windows 8.1 by following these steps:</li>
96
- <ol>
97
- <li>Open the Google Play Store app or the Microsoft Store app on your device.</li>
98
- <li>Search for Beach Buggy Racing and tap on it.</li>
99
- <li>If there is an update available, tap on the Update button and wait for the game to update.</li>
100
- </ol>
101
- <li>Q4: What are the best powerups to use in Beach Buggy Racing?</li>
102
- <li>A4: The best powerups to use in Beach Buggy Racing depend on your play style and strategy. However, some of the most useful powerups are:</li>
103
- <ul>
104
- <li>Fireball: This powerup allows you to shoot a fireball that can hit multiple opponents or obstacles.</li>
105
- <li>Electro Blast: This powerup allows you to unleash a shockwave that can stun nearby opponents or destroy obstacles.</li>
106
- <li>Tiki Seeker: This powerup allows you to summon a homing missile that can chase and hit a random opponent.</li>
107
- </ul>
108
- <li>Q5: How can I contact the developers of Beach Buggy Racing?</li>
109
- <li>A5: You can contact the developers of Beach Buggy Racing by visiting their website at https://www.vectorunit.com/beach-buggy-racing or by sending them an email at [email protected].</li>
110
- </ul></p> 197e85843d<br />
111
- <br />
112
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/__init__.py DELETED
@@ -1,100 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- # flake8: noqa
16
-
17
- import os
18
-
19
- from packaging import version
20
-
21
- from ..version import VERSION as __version__
22
- from .deprecation_utils import deprecate
23
- from .import_utils import (
24
- ENV_VARS_TRUE_AND_AUTO_VALUES,
25
- ENV_VARS_TRUE_VALUES,
26
- USE_PADDLE,
27
- DummyObject,
28
- OptionalDependencyNotAvailable,
29
- is_fastdeploy_available,
30
- is_inflect_available,
31
- is_k_diffusion_available,
32
- is_librosa_available,
33
- is_modelcards_available,
34
- is_onnx_available,
35
- is_paddle_available,
36
- is_paddle_version,
37
- is_paddlenlp_available,
38
- is_scipy_available,
39
- is_unidecode_available,
40
- is_wandb_available,
41
- requires_backends,
42
- )
43
- from .logging import get_logger
44
- from .outputs import BaseOutput
45
- from .pil_utils import PIL_INTERPOLATION
46
-
47
- if is_paddle_available():
48
- from .testing_utils import (
49
- floats_tensor,
50
- image_grid,
51
- load_hf_numpy,
52
- load_image,
53
- load_numpy,
54
- load_ppnlp_numpy,
55
- nightly,
56
- paddle_all_close,
57
- parse_flag_from_env,
58
- slow,
59
- )
60
-
61
- logger = get_logger(__name__)
62
-
63
- from paddlenlp.utils.env import _get_ppnlp_home, _get_sub_home
64
-
65
- ppnlp_cache_home = _get_ppnlp_home()
66
- default_cache_path = _get_sub_home("models")
67
-
68
- CONFIG_NAME = "config.json"
69
- WEIGHTS_NAME = "model_state.pdparams"
70
- FASTDEPLOY_WEIGHTS_NAME = "inference.pdiparams"
71
- FASTDEPLOY_MODEL_NAME = "inference.pdmodel"
72
- DOWNLOAD_SERVER = "https://bj.bcebos.com/paddlenlp/models/community"
73
- PPDIFFUSERS_CACHE = default_cache_path
74
- PPDIFFUSERS_DYNAMIC_MODULE_NAME = "ppdiffusers_modules"
75
- PPNLP_MODULES_CACHE = os.getenv("PPNLP_MODULES_CACHE", _get_sub_home("modules"))
76
- HF_CACHE = os.environ.get("HUGGINGFACE_HUB_CACHE", PPDIFFUSERS_CACHE)
77
- TEST_DOWNLOAD_SERVER = "https://paddlenlp.bj.bcebos.com/models/community/ppdiffusers/tests"
78
- HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co"
79
-
80
- _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS = [
81
- "DDIMScheduler",
82
- "DDPMScheduler",
83
- "PNDMScheduler",
84
- "LMSDiscreteScheduler",
85
- "EulerDiscreteScheduler",
86
- "HeunDiscreteScheduler",
87
- "EulerAncestralDiscreteScheduler",
88
- "DPMSolverMultistepScheduler",
89
- "DPMSolverSinglestepScheduler",
90
- ]
91
-
92
-
93
- def check_min_version(min_version):
94
- if version.parse(__version__) < version.parse(min_version):
95
- if "dev" in min_version:
96
- error_message = "This example requires a source install from ppdiffusers"
97
- else:
98
- error_message = f"This example requires a minimum version of {min_version},"
99
- error_message += f" but the version found is {__version__}.\n"
100
- raise ImportError(error_message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-suggestions.tsx DELETED
@@ -1,45 +0,0 @@
1
- import React, { useMemo } from 'react'
2
- import Image from 'next/image'
3
- import HelpIcon from '@/assets/images/help.svg'
4
- import { SuggestedResponse } from '@/lib/bots/bing/types'
5
- import { useBing } from '@/lib/hooks/use-bing'
6
- import { atom, useAtom } from 'jotai'
7
-
8
- type Suggestions = SuggestedResponse[]
9
- const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text }))
10
- const suggestionsAtom = atom<Suggestions>([])
11
-
12
- type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick<ReturnType<typeof useBing>, 'setInput'> & { suggestions?: Suggestions }
13
-
14
- export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) {
15
- const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom)
16
- const toggleSuggestions = (() => {
17
- if (currentSuggestions === helpSuggestions) {
18
- setSuggestions(suggestions)
19
- } else {
20
- setSuggestions(helpSuggestions)
21
- }
22
- })
23
-
24
- useMemo(() => {
25
- setSuggestions(suggestions)
26
- window.scrollBy(0, 2000)
27
- }, [suggestions.length])
28
-
29
- return currentSuggestions?.length ? (
30
- <div className="py-6">
31
- <div className="suggestion-items">
32
- <button className="rai-button" type="button" aria-label="这是什么?" onClick={toggleSuggestions}>
33
- <Image alt="help" src={HelpIcon} width={24} />
34
- </button>
35
- {
36
- currentSuggestions.map(suggestion => (
37
- <button key={suggestion.text} className="body-1-strong suggestion-container" type="button" onClick={() => setInput(suggestion.text)}>
38
- {suggestion.text}
39
- </button>
40
- ))
41
- }
42
- </div>
43
- </div>
44
- ) : null
45
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/Makefile DELETED
@@ -1,40 +0,0 @@
1
- INTEG=AUDIOCRAFT_DORA_DIR="/tmp/magma_$(USER)" python3 -m dora -v run --clear device=cpu dataset.num_workers=0 optim.epochs=1 \
2
- dataset.train.num_samples=10 dataset.valid.num_samples=10 \
3
- dataset.evaluate.num_samples=10 dataset.generate.num_samples=2 sample_rate=16000 \
4
- logging.level=DEBUG
5
- INTEG_COMPRESSION = $(INTEG) solver=compression/debug rvq.n_q=2 rvq.bins=48 checkpoint.save_last=true # SIG is 616d7b3c
6
- INTEG_MUSICGEN = $(INTEG) solver=musicgen/debug dset=audio/example compression_model_checkpoint=//sig/5091833e \
7
- transformer_lm.n_q=2 transformer_lm.card=48 transformer_lm.dim=16 checkpoint.save_last=false # Using compression model from 616d7b3c
8
- INTEG_AUDIOGEN = $(INTEG) solver=audiogen/debug dset=audio/example compression_model_checkpoint=//sig/5091833e \
9
- transformer_lm.n_q=2 transformer_lm.card=48 transformer_lm.dim=16 checkpoint.save_last=false # Using compression model from 616d7b3c
10
- INTEG_MBD = $(INTEG) solver=diffusion/debug dset=audio/example \
11
- checkpoint.save_last=false # Using compression model from 616d7b3c
12
-
13
- default: linter tests
14
-
15
- install:
16
- pip install -U pip
17
- pip install -U -e '.[dev]'
18
-
19
- linter:
20
- flake8 audiocraft && mypy audiocraft
21
- flake8 tests && mypy tests
22
-
23
- tests:
24
- coverage run -m pytest tests
25
- coverage report
26
-
27
- tests_integ:
28
- $(INTEG_COMPRESSION)
29
- $(INTEG_MBD)
30
- $(INTEG_MUSICGEN)
31
- $(INTEG_AUDIOGEN)
32
-
33
-
34
- api_docs:
35
- pdoc3 --html -o api_docs -f audiocraft
36
-
37
- dist:
38
- python setup.py sdist
39
-
40
- .PHONY: linter tests api_docs dist
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/diacritizer.py DELETED
@@ -1,98 +0,0 @@
1
- from typing import Dict
2
- import torch
3
- from .config_manager import ConfigManager
4
-
5
-
6
- class Diacritizer:
7
- def __init__(
8
- self, config_path: str, model_kind: str, load_model: bool = False
9
- ) -> None:
10
- self.config_path = config_path
11
- self.model_kind = model_kind
12
- self.config_manager = ConfigManager(
13
- config_path=config_path, model_kind=model_kind
14
- )
15
- self.config = self.config_manager.config
16
- self.text_encoder = self.config_manager.text_encoder
17
- if self.config.get("device"):
18
- self.device = self.config["device"]
19
- else:
20
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
21
-
22
- if load_model:
23
- self.model, self.global_step = self.config_manager.load_model()
24
- self.model = self.model.to(self.device)
25
-
26
- self.start_symbol_id = self.text_encoder.start_symbol_id
27
-
28
- def set_model(self, model: torch.nn.Module):
29
- self.model = model
30
-
31
- def diacritize_text(self, text: str):
32
- seq = self.text_encoder.input_to_sequence(text)
33
- output = self.diacritize_batch(torch.LongTensor([seq]).to(self.device))
34
-
35
- def diacritize_batch(self, batch):
36
- raise NotImplementedError()
37
-
38
- def diacritize_iterators(self, iterator):
39
- pass
40
-
41
-
42
- class CBHGDiacritizer(Diacritizer):
43
- def diacritize_batch(self, batch):
44
- self.model.eval()
45
- inputs = batch["src"]
46
- lengths = batch["lengths"]
47
- outputs = self.model(inputs.to(self.device), lengths.to("cpu"))
48
- diacritics = outputs["diacritics"]
49
- predictions = torch.max(diacritics, 2).indices
50
- sentences = []
51
-
52
- for src, prediction in zip(inputs, predictions):
53
- sentence = self.text_encoder.combine_text_and_haraqat(
54
- list(src.detach().cpu().numpy()),
55
- list(prediction.detach().cpu().numpy()),
56
- )
57
- sentences.append(sentence)
58
-
59
- return sentences
60
-
61
-
62
- class Seq2SeqDiacritizer(Diacritizer):
63
- def diacritize_batch(self, batch):
64
- self.model.eval()
65
- inputs = batch["src"]
66
- lengths = batch["lengths"]
67
- outputs = self.model(inputs.to(self.device), lengths.to("cpu"))
68
- diacritics = outputs["diacritics"]
69
- predictions = torch.max(diacritics, 2).indices
70
- sentences = []
71
-
72
- for src, prediction in zip(inputs, predictions):
73
- sentence = self.text_encoder.combine_text_and_haraqat(
74
- list(src.detach().cpu().numpy()),
75
- list(prediction.detach().cpu().numpy()),
76
- )
77
- sentences.append(sentence)
78
-
79
- return sentences
80
-
81
- class GPTDiacritizer(Diacritizer):
82
- def diacritize_batch(self, batch):
83
- self.model.eval()
84
- inputs = batch["src"]
85
- lengths = batch["lengths"]
86
- outputs = self.model(inputs.to(self.device), lengths.to("cpu"))
87
- diacritics = outputs["diacritics"]
88
- predictions = torch.max(diacritics, 2).indices
89
- sentences = []
90
-
91
- for src, prediction in zip(inputs, predictions):
92
- sentence = self.text_encoder.combine_text_and_haraqat(
93
- list(src.detach().cpu().numpy()),
94
- list(prediction.detach().cpu().numpy()),
95
- )
96
- sentences.append(sentence)
97
-
98
- return sentences
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhi1262/MyGenAIChatBot/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """Meet Abhi, your youthful and witty personal assistant! At 20 years old, He's full of energy and always eager to help. Abhi's goal is to assist you with any questions or problems you might have. His enthusiasm shines through in every response, making interactions with her enjoyable and engaging.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/Model.ts DELETED
@@ -1,16 +0,0 @@
1
- import type { BackendModel } from "$lib/server/models";
2
-
3
- export type Model = Pick<
4
- BackendModel,
5
- | "id"
6
- | "name"
7
- | "displayName"
8
- | "websiteUrl"
9
- | "datasetName"
10
- | "promptExamples"
11
- | "parameters"
12
- | "description"
13
- | "modelUrl"
14
- | "datasetUrl"
15
- | "preprompt"
16
- >;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/ScrollablePanel.d.ts DELETED
@@ -1,40 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Scrollable from '../utils/scrollable/Scrollable';
3
-
4
-
5
- export default ScrollablePanel;
6
-
7
- declare namespace ScrollablePanel {
8
-
9
- interface IConfig extends Scrollable.IConfig {
10
- space?: {
11
- left?: number, right?: number, top?: number, bottom?: number,
12
-
13
- panel?: number | {
14
- left?: number, right?: number, top?: number, bottom?: number,
15
- },
16
-
17
- header?: number,
18
- footer?: number,
19
- },
20
-
21
- panel: {
22
- child: Phaser.GameObjects.GameObject,
23
- mask?: (
24
- {
25
- padding?: number,
26
- updateMode?: 0 | 1 | 'update' | 'everyTick'
27
- } |
28
- boolean
29
- ),
30
- },
31
- }
32
- }
33
-
34
- declare class ScrollablePanel extends Scrollable {
35
- constructor(
36
- scene: Phaser.Scene,
37
- config?: ScrollablePanel.IConfig
38
- );
39
-
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/numerizerlit/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: Numerizerlit
3
- emoji: 📚
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: streamlit
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio` or `streamlit`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/loading_overview.md DELETED
@@ -1,17 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Overview
14
-
15
- 🧨 Diffusers offers many pipelines, models, and schedulers for generative tasks. To make loading these components as simple as possible, we provide a single and unified method - `from_pretrained()` - that loads any of these components from either the Hugging Face [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) or your local machine. Whenever you load a pipeline or model, the latest files are automatically downloaded and cached so you can quickly reuse them next time without redownloading the files.
16
-
17
- This section will show you everything you need to know about loading pipelines, how to load different components in a pipeline, how to load checkpoint variants, and how to load community pipelines. You'll also learn how to load schedulers and compare the speed and quality trade-offs of using different schedulers. Finally, you'll see how to convert and load KerasCV checkpoints so you can use them in PyTorch with 🧨 Diffusers.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_deis_multistep.py DELETED
@@ -1,568 +0,0 @@
1
- # Copyright 2023 FLAIR Lab and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: check https://arxiv.org/abs/2204.13902 and https://github.com/qsh-zh/deis for more info
16
- # The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py
17
-
18
- import math
19
- from typing import List, Optional, Tuple, Union
20
-
21
- import numpy as np
22
- import torch
23
-
24
- from ..configuration_utils import ConfigMixin, register_to_config
25
- from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
26
-
27
-
28
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
29
- def betas_for_alpha_bar(
30
- num_diffusion_timesteps,
31
- max_beta=0.999,
32
- alpha_transform_type="cosine",
33
- ):
34
- """
35
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
36
- (1-beta) over time from t = [0,1].
37
-
38
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
39
- to that part of the diffusion process.
40
-
41
-
42
- Args:
43
- num_diffusion_timesteps (`int`): the number of betas to produce.
44
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
45
- prevent singularities.
46
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
47
- Choose from `cosine` or `exp`
48
-
49
- Returns:
50
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
51
- """
52
- if alpha_transform_type == "cosine":
53
-
54
- def alpha_bar_fn(t):
55
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
56
-
57
- elif alpha_transform_type == "exp":
58
-
59
- def alpha_bar_fn(t):
60
- return math.exp(t * -12.0)
61
-
62
- else:
63
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
64
-
65
- betas = []
66
- for i in range(num_diffusion_timesteps):
67
- t1 = i / num_diffusion_timesteps
68
- t2 = (i + 1) / num_diffusion_timesteps
69
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
70
- return torch.tensor(betas, dtype=torch.float32)
71
-
72
-
73
- class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
74
- """
75
- DEIS (https://arxiv.org/abs/2204.13902) is a fast high order solver for diffusion ODEs. We slightly modify the
76
- polynomial fitting formula in log-rho space instead of the original linear t space in DEIS paper. The modification
77
- enjoys closed-form coefficients for exponential multistep update instead of replying on the numerical solver. More
78
- variants of DEIS can be found in https://github.com/qsh-zh/deis.
79
-
80
- Currently, we support the log-rho multistep DEIS. We recommend to use `solver_order=2 / 3` while `solver_order=1`
81
- reduces to DDIM.
82
-
83
- We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
84
- diffusion models, you can set `thresholding=True` to use the dynamic thresholding.
85
-
86
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
87
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
88
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
89
- [`~SchedulerMixin.from_pretrained`] functions.
90
-
91
- Args:
92
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
93
- beta_start (`float`): the starting `beta` value of inference.
94
- beta_end (`float`): the final `beta` value.
95
- beta_schedule (`str`):
96
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
97
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
98
- trained_betas (`np.ndarray`, optional):
99
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
100
- solver_order (`int`, default `2`):
101
- the order of DEIS; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided sampling, and
102
- `solver_order=3` for unconditional sampling.
103
- prediction_type (`str`, default `epsilon`):
104
- indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`,
105
- or `v-prediction`.
106
- thresholding (`bool`, default `False`):
107
- whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
108
- Note that the thresholding method is unsuitable for latent-space diffusion models (such as
109
- stable-diffusion).
110
- dynamic_thresholding_ratio (`float`, default `0.995`):
111
- the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
112
- (https://arxiv.org/abs/2205.11487).
113
- sample_max_value (`float`, default `1.0`):
114
- the threshold value for dynamic thresholding. Valid only when `thresholding=True`
115
- algorithm_type (`str`, default `deis`):
116
- the algorithm type for the solver. current we support multistep deis, we will add other variants of DEIS in
117
- the future
118
- lower_order_final (`bool`, default `True`):
119
- whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
120
- find this trick can stabilize the sampling of DEIS for steps < 15, especially for steps <= 10.
121
- use_karras_sigmas (`bool`, *optional*, defaults to `False`):
122
- This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the
123
- noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence
124
- of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf.
125
- timestep_spacing (`str`, default `"linspace"`):
126
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
127
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
128
- steps_offset (`int`, default `0`):
129
- an offset added to the inference steps. You can use a combination of `offset=1` and
130
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
131
- stable diffusion.
132
- """
133
-
134
- _compatibles = [e.name for e in KarrasDiffusionSchedulers]
135
- order = 1
136
-
137
- @register_to_config
138
- def __init__(
139
- self,
140
- num_train_timesteps: int = 1000,
141
- beta_start: float = 0.0001,
142
- beta_end: float = 0.02,
143
- beta_schedule: str = "linear",
144
- trained_betas: Optional[np.ndarray] = None,
145
- solver_order: int = 2,
146
- prediction_type: str = "epsilon",
147
- thresholding: bool = False,
148
- dynamic_thresholding_ratio: float = 0.995,
149
- sample_max_value: float = 1.0,
150
- algorithm_type: str = "deis",
151
- solver_type: str = "logrho",
152
- lower_order_final: bool = True,
153
- use_karras_sigmas: Optional[bool] = False,
154
- timestep_spacing: str = "linspace",
155
- steps_offset: int = 0,
156
- ):
157
- if trained_betas is not None:
158
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
159
- elif beta_schedule == "linear":
160
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
161
- elif beta_schedule == "scaled_linear":
162
- # this schedule is very specific to the latent diffusion model.
163
- self.betas = (
164
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
165
- )
166
- elif beta_schedule == "squaredcos_cap_v2":
167
- # Glide cosine schedule
168
- self.betas = betas_for_alpha_bar(num_train_timesteps)
169
- else:
170
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
171
-
172
- self.alphas = 1.0 - self.betas
173
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
174
- # Currently we only support VP-type noise schedule
175
- self.alpha_t = torch.sqrt(self.alphas_cumprod)
176
- self.sigma_t = torch.sqrt(1 - self.alphas_cumprod)
177
- self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t)
178
-
179
- # standard deviation of the initial noise distribution
180
- self.init_noise_sigma = 1.0
181
-
182
- # settings for DEIS
183
- if algorithm_type not in ["deis"]:
184
- if algorithm_type in ["dpmsolver", "dpmsolver++"]:
185
- self.register_to_config(algorithm_type="deis")
186
- else:
187
- raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}")
188
-
189
- if solver_type not in ["logrho"]:
190
- if solver_type in ["midpoint", "heun", "bh1", "bh2"]:
191
- self.register_to_config(solver_type="logrho")
192
- else:
193
- raise NotImplementedError(f"solver type {solver_type} does is not implemented for {self.__class__}")
194
-
195
- # setable values
196
- self.num_inference_steps = None
197
- timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
198
- self.timesteps = torch.from_numpy(timesteps)
199
- self.model_outputs = [None] * solver_order
200
- self.lower_order_nums = 0
201
-
202
- def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
203
- """
204
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
205
-
206
- Args:
207
- num_inference_steps (`int`):
208
- the number of diffusion steps used when generating samples with a pre-trained model.
209
- device (`str` or `torch.device`, optional):
210
- the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
211
- """
212
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
213
- if self.config.timestep_spacing == "linspace":
214
- timesteps = (
215
- np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1)
216
- .round()[::-1][:-1]
217
- .copy()
218
- .astype(np.int64)
219
- )
220
- elif self.config.timestep_spacing == "leading":
221
- step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1)
222
- # creates integer timesteps by multiplying by ratio
223
- # casting to int to avoid issues when num_inference_step is power of 3
224
- timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64)
225
- timesteps += self.config.steps_offset
226
- elif self.config.timestep_spacing == "trailing":
227
- step_ratio = self.config.num_train_timesteps / num_inference_steps
228
- # creates integer timesteps by multiplying by ratio
229
- # casting to int to avoid issues when num_inference_step is power of 3
230
- timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64)
231
- timesteps -= 1
232
- else:
233
- raise ValueError(
234
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
235
- )
236
-
237
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
238
- if self.config.use_karras_sigmas:
239
- log_sigmas = np.log(sigmas)
240
- sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
241
- timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
242
- timesteps = np.flip(timesteps).copy().astype(np.int64)
243
-
244
- self.sigmas = torch.from_numpy(sigmas)
245
-
246
- # when num_inference_steps == num_train_timesteps, we can end up with
247
- # duplicates in timesteps.
248
- _, unique_indices = np.unique(timesteps, return_index=True)
249
- timesteps = timesteps[np.sort(unique_indices)]
250
-
251
- self.timesteps = torch.from_numpy(timesteps).to(device)
252
-
253
- self.num_inference_steps = len(timesteps)
254
-
255
- self.model_outputs = [
256
- None,
257
- ] * self.config.solver_order
258
- self.lower_order_nums = 0
259
-
260
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
261
- def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
262
- """
263
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
264
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
265
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
266
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
267
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
268
-
269
- https://arxiv.org/abs/2205.11487
270
- """
271
- dtype = sample.dtype
272
- batch_size, channels, height, width = sample.shape
273
-
274
- if dtype not in (torch.float32, torch.float64):
275
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
276
-
277
- # Flatten sample for doing quantile calculation along each image
278
- sample = sample.reshape(batch_size, channels * height * width)
279
-
280
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
281
-
282
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
283
- s = torch.clamp(
284
- s, min=1, max=self.config.sample_max_value
285
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
286
-
287
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
288
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
289
-
290
- sample = sample.reshape(batch_size, channels, height, width)
291
- sample = sample.to(dtype)
292
-
293
- return sample
294
-
295
- def convert_model_output(
296
- self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor
297
- ) -> torch.FloatTensor:
298
- """
299
- Convert the model output to the corresponding type that the algorithm DEIS needs.
300
-
301
- Args:
302
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
303
- timestep (`int`): current discrete timestep in the diffusion chain.
304
- sample (`torch.FloatTensor`):
305
- current instance of sample being created by diffusion process.
306
-
307
- Returns:
308
- `torch.FloatTensor`: the converted model output.
309
- """
310
- if self.config.prediction_type == "epsilon":
311
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
312
- x0_pred = (sample - sigma_t * model_output) / alpha_t
313
- elif self.config.prediction_type == "sample":
314
- x0_pred = model_output
315
- elif self.config.prediction_type == "v_prediction":
316
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
317
- x0_pred = alpha_t * sample - sigma_t * model_output
318
- else:
319
- raise ValueError(
320
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
321
- " `v_prediction` for the DEISMultistepScheduler."
322
- )
323
-
324
- if self.config.thresholding:
325
- x0_pred = self._threshold_sample(x0_pred)
326
-
327
- if self.config.algorithm_type == "deis":
328
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
329
- return (sample - alpha_t * x0_pred) / sigma_t
330
- else:
331
- raise NotImplementedError("only support log-rho multistep deis now")
332
-
333
- def deis_first_order_update(
334
- self,
335
- model_output: torch.FloatTensor,
336
- timestep: int,
337
- prev_timestep: int,
338
- sample: torch.FloatTensor,
339
- ) -> torch.FloatTensor:
340
- """
341
- One step for the first-order DEIS (equivalent to DDIM).
342
-
343
- Args:
344
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
345
- timestep (`int`): current discrete timestep in the diffusion chain.
346
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
347
- sample (`torch.FloatTensor`):
348
- current instance of sample being created by diffusion process.
349
-
350
- Returns:
351
- `torch.FloatTensor`: the sample tensor at the previous timestep.
352
- """
353
- lambda_t, lambda_s = self.lambda_t[prev_timestep], self.lambda_t[timestep]
354
- alpha_t, alpha_s = self.alpha_t[prev_timestep], self.alpha_t[timestep]
355
- sigma_t, _ = self.sigma_t[prev_timestep], self.sigma_t[timestep]
356
- h = lambda_t - lambda_s
357
- if self.config.algorithm_type == "deis":
358
- x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output
359
- else:
360
- raise NotImplementedError("only support log-rho multistep deis now")
361
- return x_t
362
-
363
- def multistep_deis_second_order_update(
364
- self,
365
- model_output_list: List[torch.FloatTensor],
366
- timestep_list: List[int],
367
- prev_timestep: int,
368
- sample: torch.FloatTensor,
369
- ) -> torch.FloatTensor:
370
- """
371
- One step for the second-order multistep DEIS.
372
-
373
- Args:
374
- model_output_list (`List[torch.FloatTensor]`):
375
- direct outputs from learned diffusion model at current and latter timesteps.
376
- timestep (`int`): current and latter discrete timestep in the diffusion chain.
377
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
378
- sample (`torch.FloatTensor`):
379
- current instance of sample being created by diffusion process.
380
-
381
- Returns:
382
- `torch.FloatTensor`: the sample tensor at the previous timestep.
383
- """
384
- t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
385
- m0, m1 = model_output_list[-1], model_output_list[-2]
386
- alpha_t, alpha_s0, alpha_s1 = self.alpha_t[t], self.alpha_t[s0], self.alpha_t[s1]
387
- sigma_t, sigma_s0, sigma_s1 = self.sigma_t[t], self.sigma_t[s0], self.sigma_t[s1]
388
-
389
- rho_t, rho_s0, rho_s1 = sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1
390
-
391
- if self.config.algorithm_type == "deis":
392
-
393
- def ind_fn(t, b, c):
394
- # Integrate[(log(t) - log(c)) / (log(b) - log(c)), {t}]
395
- return t * (-np.log(c) + np.log(t) - 1) / (np.log(b) - np.log(c))
396
-
397
- coef1 = ind_fn(rho_t, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s0, rho_s1)
398
- coef2 = ind_fn(rho_t, rho_s1, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s0)
399
-
400
- x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1)
401
- return x_t
402
- else:
403
- raise NotImplementedError("only support log-rho multistep deis now")
404
-
405
- def multistep_deis_third_order_update(
406
- self,
407
- model_output_list: List[torch.FloatTensor],
408
- timestep_list: List[int],
409
- prev_timestep: int,
410
- sample: torch.FloatTensor,
411
- ) -> torch.FloatTensor:
412
- """
413
- One step for the third-order multistep DEIS.
414
-
415
- Args:
416
- model_output_list (`List[torch.FloatTensor]`):
417
- direct outputs from learned diffusion model at current and latter timesteps.
418
- timestep (`int`): current and latter discrete timestep in the diffusion chain.
419
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
420
- sample (`torch.FloatTensor`):
421
- current instance of sample being created by diffusion process.
422
-
423
- Returns:
424
- `torch.FloatTensor`: the sample tensor at the previous timestep.
425
- """
426
- t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
427
- m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
428
- alpha_t, alpha_s0, alpha_s1, alpha_s2 = self.alpha_t[t], self.alpha_t[s0], self.alpha_t[s1], self.alpha_t[s2]
429
- sigma_t, sigma_s0, sigma_s1, simga_s2 = self.sigma_t[t], self.sigma_t[s0], self.sigma_t[s1], self.sigma_t[s2]
430
- rho_t, rho_s0, rho_s1, rho_s2 = (
431
- sigma_t / alpha_t,
432
- sigma_s0 / alpha_s0,
433
- sigma_s1 / alpha_s1,
434
- simga_s2 / alpha_s2,
435
- )
436
-
437
- if self.config.algorithm_type == "deis":
438
-
439
- def ind_fn(t, b, c, d):
440
- # Integrate[(log(t) - log(c))(log(t) - log(d)) / (log(b) - log(c))(log(b) - log(d)), {t}]
441
- numerator = t * (
442
- np.log(c) * (np.log(d) - np.log(t) + 1)
443
- - np.log(d) * np.log(t)
444
- + np.log(d)
445
- + np.log(t) ** 2
446
- - 2 * np.log(t)
447
- + 2
448
- )
449
- denominator = (np.log(b) - np.log(c)) * (np.log(b) - np.log(d))
450
- return numerator / denominator
451
-
452
- coef1 = ind_fn(rho_t, rho_s0, rho_s1, rho_s2) - ind_fn(rho_s0, rho_s0, rho_s1, rho_s2)
453
- coef2 = ind_fn(rho_t, rho_s1, rho_s2, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s2, rho_s0)
454
- coef3 = ind_fn(rho_t, rho_s2, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s2, rho_s0, rho_s1)
455
-
456
- x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1 + coef3 * m2)
457
-
458
- return x_t
459
- else:
460
- raise NotImplementedError("only support log-rho multistep deis now")
461
-
462
- def step(
463
- self,
464
- model_output: torch.FloatTensor,
465
- timestep: int,
466
- sample: torch.FloatTensor,
467
- return_dict: bool = True,
468
- ) -> Union[SchedulerOutput, Tuple]:
469
- """
470
- Step function propagating the sample with the multistep DEIS.
471
-
472
- Args:
473
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
474
- timestep (`int`): current discrete timestep in the diffusion chain.
475
- sample (`torch.FloatTensor`):
476
- current instance of sample being created by diffusion process.
477
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
478
-
479
- Returns:
480
- [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
481
- True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
482
-
483
- """
484
- if self.num_inference_steps is None:
485
- raise ValueError(
486
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
487
- )
488
-
489
- if isinstance(timestep, torch.Tensor):
490
- timestep = timestep.to(self.timesteps.device)
491
- step_index = (self.timesteps == timestep).nonzero()
492
- if len(step_index) == 0:
493
- step_index = len(self.timesteps) - 1
494
- else:
495
- step_index = step_index.item()
496
- prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
497
- lower_order_final = (
498
- (step_index == len(self.timesteps) - 1) and self.config.lower_order_final and len(self.timesteps) < 15
499
- )
500
- lower_order_second = (
501
- (step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15
502
- )
503
-
504
- model_output = self.convert_model_output(model_output, timestep, sample)
505
- for i in range(self.config.solver_order - 1):
506
- self.model_outputs[i] = self.model_outputs[i + 1]
507
- self.model_outputs[-1] = model_output
508
-
509
- if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final:
510
- prev_sample = self.deis_first_order_update(model_output, timestep, prev_timestep, sample)
511
- elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
512
- timestep_list = [self.timesteps[step_index - 1], timestep]
513
- prev_sample = self.multistep_deis_second_order_update(
514
- self.model_outputs, timestep_list, prev_timestep, sample
515
- )
516
- else:
517
- timestep_list = [self.timesteps[step_index - 2], self.timesteps[step_index - 1], timestep]
518
- prev_sample = self.multistep_deis_third_order_update(
519
- self.model_outputs, timestep_list, prev_timestep, sample
520
- )
521
-
522
- if self.lower_order_nums < self.config.solver_order:
523
- self.lower_order_nums += 1
524
-
525
- if not return_dict:
526
- return (prev_sample,)
527
-
528
- return SchedulerOutput(prev_sample=prev_sample)
529
-
530
- def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
531
- """
532
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
533
- current timestep.
534
-
535
- Args:
536
- sample (`torch.FloatTensor`): input sample
537
-
538
- Returns:
539
- `torch.FloatTensor`: scaled input sample
540
- """
541
- return sample
542
-
543
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
544
- def add_noise(
545
- self,
546
- original_samples: torch.FloatTensor,
547
- noise: torch.FloatTensor,
548
- timesteps: torch.IntTensor,
549
- ) -> torch.FloatTensor:
550
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
551
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
552
- timesteps = timesteps.to(original_samples.device)
553
-
554
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
555
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
556
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
557
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
558
-
559
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
560
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
561
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
562
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
563
-
564
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
565
- return noisy_samples
566
-
567
- def __len__(self):
568
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py DELETED
@@ -1,264 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import random
18
- import unittest
19
-
20
- import numpy as np
21
- import torch
22
- from PIL import Image
23
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
24
-
25
- from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel
26
- from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
27
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
28
-
29
- from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
30
- from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
31
-
32
-
33
- enable_full_determinism()
34
-
35
-
36
- class StableDiffusion2InpaintPipelineFastTests(
37
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
38
- ):
39
- pipeline_class = StableDiffusionInpaintPipeline
40
- params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
41
- batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
42
- image_params = frozenset(
43
- []
44
- ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
45
- image_latents_params = frozenset([])
46
-
47
- def get_dummy_components(self):
48
- torch.manual_seed(0)
49
- unet = UNet2DConditionModel(
50
- block_out_channels=(32, 64),
51
- layers_per_block=2,
52
- sample_size=32,
53
- in_channels=9,
54
- out_channels=4,
55
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
56
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
57
- cross_attention_dim=32,
58
- # SD2-specific config below
59
- attention_head_dim=(2, 4),
60
- use_linear_projection=True,
61
- )
62
- scheduler = PNDMScheduler(skip_prk_steps=True)
63
- torch.manual_seed(0)
64
- vae = AutoencoderKL(
65
- block_out_channels=[32, 64],
66
- in_channels=3,
67
- out_channels=3,
68
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
69
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
70
- latent_channels=4,
71
- sample_size=128,
72
- )
73
- torch.manual_seed(0)
74
- text_encoder_config = CLIPTextConfig(
75
- bos_token_id=0,
76
- eos_token_id=2,
77
- hidden_size=32,
78
- intermediate_size=37,
79
- layer_norm_eps=1e-05,
80
- num_attention_heads=4,
81
- num_hidden_layers=5,
82
- pad_token_id=1,
83
- vocab_size=1000,
84
- # SD2-specific config below
85
- hidden_act="gelu",
86
- projection_dim=512,
87
- )
88
- text_encoder = CLIPTextModel(text_encoder_config)
89
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
90
-
91
- components = {
92
- "unet": unet,
93
- "scheduler": scheduler,
94
- "vae": vae,
95
- "text_encoder": text_encoder,
96
- "tokenizer": tokenizer,
97
- "safety_checker": None,
98
- "feature_extractor": None,
99
- }
100
- return components
101
-
102
- def get_dummy_inputs(self, device, seed=0):
103
- # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
104
- image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
105
- image = image.cpu().permute(0, 2, 3, 1)[0]
106
- init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
107
- mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64))
108
- if str(device).startswith("mps"):
109
- generator = torch.manual_seed(seed)
110
- else:
111
- generator = torch.Generator(device=device).manual_seed(seed)
112
- inputs = {
113
- "prompt": "A painting of a squirrel eating a burger",
114
- "image": init_image,
115
- "mask_image": mask_image,
116
- "generator": generator,
117
- "num_inference_steps": 2,
118
- "guidance_scale": 6.0,
119
- "output_type": "numpy",
120
- }
121
- return inputs
122
-
123
- def test_stable_diffusion_inpaint(self):
124
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
125
- components = self.get_dummy_components()
126
- sd_pipe = StableDiffusionInpaintPipeline(**components)
127
- sd_pipe = sd_pipe.to(device)
128
- sd_pipe.set_progress_bar_config(disable=None)
129
-
130
- inputs = self.get_dummy_inputs(device)
131
- image = sd_pipe(**inputs).images
132
- image_slice = image[0, -3:, -3:, -1]
133
-
134
- assert image.shape == (1, 64, 64, 3)
135
- expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
136
-
137
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
138
-
139
- def test_inference_batch_single_identical(self):
140
- super().test_inference_batch_single_identical(expected_max_diff=3e-3)
141
-
142
-
143
- @slow
144
- @require_torch_gpu
145
- class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
146
- def tearDown(self):
147
- # clean up the VRAM after each test
148
- super().tearDown()
149
- gc.collect()
150
- torch.cuda.empty_cache()
151
-
152
- def test_stable_diffusion_inpaint_pipeline(self):
153
- init_image = load_image(
154
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
155
- "/sd2-inpaint/init_image.png"
156
- )
157
- mask_image = load_image(
158
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
159
- )
160
- expected_image = load_numpy(
161
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
162
- "/yellow_cat_sitting_on_a_park_bench.npy"
163
- )
164
-
165
- model_id = "stabilityai/stable-diffusion-2-inpainting"
166
- pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
167
- pipe.to(torch_device)
168
- pipe.set_progress_bar_config(disable=None)
169
- pipe.enable_attention_slicing()
170
-
171
- prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
172
-
173
- generator = torch.manual_seed(0)
174
- output = pipe(
175
- prompt=prompt,
176
- image=init_image,
177
- mask_image=mask_image,
178
- generator=generator,
179
- output_type="np",
180
- )
181
- image = output.images[0]
182
-
183
- assert image.shape == (512, 512, 3)
184
- assert np.abs(expected_image - image).max() < 9e-3
185
-
186
- def test_stable_diffusion_inpaint_pipeline_fp16(self):
187
- init_image = load_image(
188
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
189
- "/sd2-inpaint/init_image.png"
190
- )
191
- mask_image = load_image(
192
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
193
- )
194
- expected_image = load_numpy(
195
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
196
- "/yellow_cat_sitting_on_a_park_bench_fp16.npy"
197
- )
198
-
199
- model_id = "stabilityai/stable-diffusion-2-inpainting"
200
- pipe = StableDiffusionInpaintPipeline.from_pretrained(
201
- model_id,
202
- torch_dtype=torch.float16,
203
- safety_checker=None,
204
- )
205
- pipe.to(torch_device)
206
- pipe.set_progress_bar_config(disable=None)
207
- pipe.enable_attention_slicing()
208
-
209
- prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
210
-
211
- generator = torch.manual_seed(0)
212
- output = pipe(
213
- prompt=prompt,
214
- image=init_image,
215
- mask_image=mask_image,
216
- generator=generator,
217
- output_type="np",
218
- )
219
- image = output.images[0]
220
-
221
- assert image.shape == (512, 512, 3)
222
- assert np.abs(expected_image - image).max() < 5e-1
223
-
224
- def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
225
- torch.cuda.empty_cache()
226
- torch.cuda.reset_max_memory_allocated()
227
- torch.cuda.reset_peak_memory_stats()
228
-
229
- init_image = load_image(
230
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
231
- "/sd2-inpaint/init_image.png"
232
- )
233
- mask_image = load_image(
234
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
235
- )
236
-
237
- model_id = "stabilityai/stable-diffusion-2-inpainting"
238
- pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler")
239
- pipe = StableDiffusionInpaintPipeline.from_pretrained(
240
- model_id,
241
- safety_checker=None,
242
- scheduler=pndm,
243
- torch_dtype=torch.float16,
244
- )
245
- pipe.to(torch_device)
246
- pipe.set_progress_bar_config(disable=None)
247
- pipe.enable_attention_slicing(1)
248
- pipe.enable_sequential_cpu_offload()
249
-
250
- prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
251
-
252
- generator = torch.manual_seed(0)
253
- _ = pipe(
254
- prompt=prompt,
255
- image=init_image,
256
- mask_image=mask_image,
257
- generator=generator,
258
- num_inference_steps=2,
259
- output_type="np",
260
- )
261
-
262
- mem_bytes = torch.cuda.max_memory_allocated()
263
- # make sure that less than 2.65 GB is allocated
264
- assert mem_bytes < 2.65 * 10**9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gn/README.md DELETED
@@ -1,31 +0,0 @@
1
- # Group Normalization
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- ```latex
8
- @inproceedings{wu2018group,
9
- title={Group Normalization},
10
- author={Wu, Yuxin and He, Kaiming},
11
- booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
12
- year={2018}
13
- }
14
- ```
15
-
16
- ## Results and Models
17
-
18
- | Backbone | model | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
19
- |:-------------:|:----------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
20
- | R-50-FPN (d) | Mask R-CNN | 2x | 7.1 | 11.0 | 40.2 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206_050355.log.json) |
21
- | R-50-FPN (d) | Mask R-CNN | 3x | 7.1 | - | 40.5 | 36.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214_063512.log.json) |
22
- | R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 9.0 | 41.9 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205_234402.log.json) |
23
- | R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | | 42.1 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609.log.json) |
24
- | R-50-FPN (c) | Mask R-CNN | 2x | 7.1 | 10.9 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207_225832.log.json) |
25
- | R-50-FPN (c) | Mask R-CNN | 3x | 7.1 | - | 40.1 | 36.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225_235135.log.json) |
26
-
27
- **Notes:**
28
-
29
- - (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk).
30
- - The `3x` schedule is epoch [28, 34, 36].
31
- - **Memory, Train/Inf time is outdated.**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/__init__.py DELETED
@@ -1,28 +0,0 @@
1
- import mmcv
2
-
3
- from .version import __version__, short_version
4
-
5
-
6
- def digit_version(version_str):
7
- digit_version = []
8
- for x in version_str.split('.'):
9
- if x.isdigit():
10
- digit_version.append(int(x))
11
- elif x.find('rc') != -1:
12
- patch_version = x.split('rc')
13
- digit_version.append(int(patch_version[0]) - 1)
14
- digit_version.append(int(patch_version[1]))
15
- return digit_version
16
-
17
-
18
- mmcv_minimum_version = '1.2.4'
19
- mmcv_maximum_version = '1.4.0'
20
- mmcv_version = digit_version(mmcv.__version__)
21
-
22
-
23
- assert (mmcv_version >= digit_version(mmcv_minimum_version)
24
- and mmcv_version <= digit_version(mmcv_maximum_version)), \
25
- f'MMCV=={mmcv.__version__} is used but incompatible. ' \
26
- f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
27
-
28
- __all__ = ['__version__', 'short_version']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/assign_result.py DELETED
@@ -1,204 +0,0 @@
1
- import torch
2
-
3
- from mmdet.utils import util_mixins
4
-
5
-
6
- class AssignResult(util_mixins.NiceRepr):
7
- """Stores assignments between predicted and truth boxes.
8
-
9
- Attributes:
10
- num_gts (int): the number of truth boxes considered when computing this
11
- assignment
12
-
13
- gt_inds (LongTensor): for each predicted box indicates the 1-based
14
- index of the assigned truth box. 0 means unassigned and -1 means
15
- ignore.
16
-
17
- max_overlaps (FloatTensor): the iou between the predicted box and its
18
- assigned truth box.
19
-
20
- labels (None | LongTensor): If specified, for each predicted box
21
- indicates the category label of the assigned truth box.
22
-
23
- Example:
24
- >>> # An assign result between 4 predicted boxes and 9 true boxes
25
- >>> # where only two boxes were assigned.
26
- >>> num_gts = 9
27
- >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
28
- >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
29
- >>> labels = torch.LongTensor([0, 3, 4, 0])
30
- >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
31
- >>> print(str(self)) # xdoctest: +IGNORE_WANT
32
- <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
33
- labels.shape=(4,))>
34
- >>> # Force addition of gt labels (when adding gt as proposals)
35
- >>> new_labels = torch.LongTensor([3, 4, 5])
36
- >>> self.add_gt_(new_labels)
37
- >>> print(str(self)) # xdoctest: +IGNORE_WANT
38
- <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
39
- labels.shape=(7,))>
40
- """
41
-
42
- def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
43
- self.num_gts = num_gts
44
- self.gt_inds = gt_inds
45
- self.max_overlaps = max_overlaps
46
- self.labels = labels
47
- # Interface for possible user-defined properties
48
- self._extra_properties = {}
49
-
50
- @property
51
- def num_preds(self):
52
- """int: the number of predictions in this assignment"""
53
- return len(self.gt_inds)
54
-
55
- def set_extra_property(self, key, value):
56
- """Set user-defined new property."""
57
- assert key not in self.info
58
- self._extra_properties[key] = value
59
-
60
- def get_extra_property(self, key):
61
- """Get user-defined property."""
62
- return self._extra_properties.get(key, None)
63
-
64
- @property
65
- def info(self):
66
- """dict: a dictionary of info about the object"""
67
- basic_info = {
68
- 'num_gts': self.num_gts,
69
- 'num_preds': self.num_preds,
70
- 'gt_inds': self.gt_inds,
71
- 'max_overlaps': self.max_overlaps,
72
- 'labels': self.labels,
73
- }
74
- basic_info.update(self._extra_properties)
75
- return basic_info
76
-
77
- def __nice__(self):
78
- """str: a "nice" summary string describing this assign result"""
79
- parts = []
80
- parts.append(f'num_gts={self.num_gts!r}')
81
- if self.gt_inds is None:
82
- parts.append(f'gt_inds={self.gt_inds!r}')
83
- else:
84
- parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
85
- if self.max_overlaps is None:
86
- parts.append(f'max_overlaps={self.max_overlaps!r}')
87
- else:
88
- parts.append('max_overlaps.shape='
89
- f'{tuple(self.max_overlaps.shape)!r}')
90
- if self.labels is None:
91
- parts.append(f'labels={self.labels!r}')
92
- else:
93
- parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
94
- return ', '.join(parts)
95
-
96
- @classmethod
97
- def random(cls, **kwargs):
98
- """Create random AssignResult for tests or debugging.
99
-
100
- Args:
101
- num_preds: number of predicted boxes
102
- num_gts: number of true boxes
103
- p_ignore (float): probability of a predicted box assinged to an
104
- ignored truth
105
- p_assigned (float): probability of a predicted box not being
106
- assigned
107
- p_use_label (float | bool): with labels or not
108
- rng (None | int | numpy.random.RandomState): seed or state
109
-
110
- Returns:
111
- :obj:`AssignResult`: Randomly generated assign results.
112
-
113
- Example:
114
- >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA
115
- >>> self = AssignResult.random()
116
- >>> print(self.info)
117
- """
118
- from mmdet.core.bbox import demodata
119
- rng = demodata.ensure_rng(kwargs.get('rng', None))
120
-
121
- num_gts = kwargs.get('num_gts', None)
122
- num_preds = kwargs.get('num_preds', None)
123
- p_ignore = kwargs.get('p_ignore', 0.3)
124
- p_assigned = kwargs.get('p_assigned', 0.7)
125
- p_use_label = kwargs.get('p_use_label', 0.5)
126
- num_classes = kwargs.get('p_use_label', 3)
127
-
128
- if num_gts is None:
129
- num_gts = rng.randint(0, 8)
130
- if num_preds is None:
131
- num_preds = rng.randint(0, 16)
132
-
133
- if num_gts == 0:
134
- max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
135
- gt_inds = torch.zeros(num_preds, dtype=torch.int64)
136
- if p_use_label is True or p_use_label < rng.rand():
137
- labels = torch.zeros(num_preds, dtype=torch.int64)
138
- else:
139
- labels = None
140
- else:
141
- import numpy as np
142
- # Create an overlap for each predicted box
143
- max_overlaps = torch.from_numpy(rng.rand(num_preds))
144
-
145
- # Construct gt_inds for each predicted box
146
- is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
147
- # maximum number of assignments constraints
148
- n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
149
-
150
- assigned_idxs = np.where(is_assigned)[0]
151
- rng.shuffle(assigned_idxs)
152
- assigned_idxs = assigned_idxs[0:n_assigned]
153
- assigned_idxs.sort()
154
-
155
- is_assigned[:] = 0
156
- is_assigned[assigned_idxs] = True
157
-
158
- is_ignore = torch.from_numpy(
159
- rng.rand(num_preds) < p_ignore) & is_assigned
160
-
161
- gt_inds = torch.zeros(num_preds, dtype=torch.int64)
162
-
163
- true_idxs = np.arange(num_gts)
164
- rng.shuffle(true_idxs)
165
- true_idxs = torch.from_numpy(true_idxs)
166
- gt_inds[is_assigned] = true_idxs[:n_assigned]
167
-
168
- gt_inds = torch.from_numpy(
169
- rng.randint(1, num_gts + 1, size=num_preds))
170
- gt_inds[is_ignore] = -1
171
- gt_inds[~is_assigned] = 0
172
- max_overlaps[~is_assigned] = 0
173
-
174
- if p_use_label is True or p_use_label < rng.rand():
175
- if num_classes == 0:
176
- labels = torch.zeros(num_preds, dtype=torch.int64)
177
- else:
178
- labels = torch.from_numpy(
179
- # remind that we set FG labels to [0, num_class-1]
180
- # since mmdet v2.0
181
- # BG cat_id: num_class
182
- rng.randint(0, num_classes, size=num_preds))
183
- labels[~is_assigned] = 0
184
- else:
185
- labels = None
186
-
187
- self = cls(num_gts, gt_inds, max_overlaps, labels)
188
- return self
189
-
190
- def add_gt_(self, gt_labels):
191
- """Add ground truth as assigned results.
192
-
193
- Args:
194
- gt_labels (torch.Tensor): Labels of gt boxes
195
- """
196
- self_inds = torch.arange(
197
- 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)
198
- self.gt_inds = torch.cat([self_inds, self.gt_inds])
199
-
200
- self.max_overlaps = torch.cat(
201
- [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
202
-
203
- if self.labels is not None:
204
- self.labels = torch.cat([gt_labels, self.labels])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/psanet_r50-d8.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
 
 
 
 
 
 
 
 
spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-nav/headroom.min.js DELETED
@@ -1,7 +0,0 @@
1
- /*!
2
- * headroom.js v0.12.0 - Give your page some headroom. Hide your header until you need it
3
- * Copyright (c) 2020 Nick Williams - http://wicky.nillia.ms/headroom.js
4
- * License: MIT
5
- */
6
-
7
- !function(t,n){"object"==typeof exports&&"undefined"!=typeof module?module.exports=n():"function"==typeof define&&define.amd?define(n):(t=t||self).Headroom=n()}(this,function(){"use strict";function t(){return"undefined"!=typeof window}function d(t){return function(t){return t&&t.document&&function(t){return 9===t.nodeType}(t.document)}(t)?function(t){var n=t.document,o=n.body,s=n.documentElement;return{scrollHeight:function(){return Math.max(o.scrollHeight,s.scrollHeight,o.offsetHeight,s.offsetHeight,o.clientHeight,s.clientHeight)},height:function(){return t.innerHeight||s.clientHeight||o.clientHeight},scrollY:function(){return void 0!==t.pageYOffset?t.pageYOffset:(s||o.parentNode||o).scrollTop}}}(t):function(t){return{scrollHeight:function(){return Math.max(t.scrollHeight,t.offsetHeight,t.clientHeight)},height:function(){return Math.max(t.offsetHeight,t.clientHeight)},scrollY:function(){return t.scrollTop}}}(t)}function n(t,s,e){var n,o=function(){var n=!1;try{var t={get passive(){n=!0}};window.addEventListener("test",t,t),window.removeEventListener("test",t,t)}catch(t){n=!1}return n}(),i=!1,r=d(t),l=r.scrollY(),a={};function c(){var t=Math.round(r.scrollY()),n=r.height(),o=r.scrollHeight();a.scrollY=t,a.lastScrollY=l,a.direction=l<t?"down":"up",a.distance=Math.abs(t-l),a.isOutOfBounds=t<0||o<t+n,a.top=t<=s.offset[a.direction],a.bottom=o<=t+n,a.toleranceExceeded=a.distance>s.tolerance[a.direction],e(a),l=t,i=!1}function h(){i||(i=!0,n=requestAnimationFrame(c))}var u=!!o&&{passive:!0,capture:!1};return t.addEventListener("scroll",h,u),c(),{destroy:function(){cancelAnimationFrame(n),t.removeEventListener("scroll",h,u)}}}function o(t){return t===Object(t)?t:{down:t,up:t}}function s(t,n){n=n||{},Object.assign(this,s.options,n),this.classes=Object.assign({},s.options.classes,n.classes),this.elem=t,this.tolerance=o(this.tolerance),this.offset=o(this.offset),this.initialised=!1,this.frozen=!1}return s.prototype={constructor:s,init:function(){return s.cutsTheMustard&&!this.initialised&&(this.addClass("initial"),this.initialised=!0,setTimeout(function(t){t.scrollTracker=n(t.scroller,{offset:t.offset,tolerance:t.tolerance},t.update.bind(t))},100,this)),this},destroy:function(){this.initialised=!1,Object.keys(this.classes).forEach(this.removeClass,this),this.scrollTracker.destroy()},unpin:function(){!this.hasClass("pinned")&&this.hasClass("unpinned")||(this.addClass("unpinned"),this.removeClass("pinned"),this.onUnpin&&this.onUnpin.call(this))},pin:function(){this.hasClass("unpinned")&&(this.addClass("pinned"),this.removeClass("unpinned"),this.onPin&&this.onPin.call(this))},freeze:function(){this.frozen=!0,this.addClass("frozen")},unfreeze:function(){this.frozen=!1,this.removeClass("frozen")},top:function(){this.hasClass("top")||(this.addClass("top"),this.removeClass("notTop"),this.onTop&&this.onTop.call(this))},notTop:function(){this.hasClass("notTop")||(this.addClass("notTop"),this.removeClass("top"),this.onNotTop&&this.onNotTop.call(this))},bottom:function(){this.hasClass("bottom")||(this.addClass("bottom"),this.removeClass("notBottom"),this.onBottom&&this.onBottom.call(this))},notBottom:function(){this.hasClass("notBottom")||(this.addClass("notBottom"),this.removeClass("bottom"),this.onNotBottom&&this.onNotBottom.call(this))},shouldUnpin:function(t){return"down"===t.direction&&!t.top&&t.toleranceExceeded},shouldPin:function(t){return"up"===t.direction&&t.toleranceExceeded||t.top},addClass:function(t){this.elem.classList.add.apply(this.elem.classList,this.classes[t].split(" "))},removeClass:function(t){this.elem.classList.remove.apply(this.elem.classList,this.classes[t].split(" "))},hasClass:function(t){return this.classes[t].split(" ").every(function(t){return this.classList.contains(t)},this.elem)},update:function(t){t.isOutOfBounds||!0!==this.frozen&&(t.top?this.top():this.notTop(),t.bottom?this.bottom():this.notBottom(),this.shouldUnpin(t)?this.unpin():this.shouldPin(t)&&this.pin())}},s.options={tolerance:{up:0,down:0},offset:0,scroller:t()?window:null,classes:{frozen:"headroom--frozen",pinned:"headroom--pinned",unpinned:"headroom--unpinned",top:"headroom--top",notTop:"headroom--not-top",bottom:"headroom--bottom",notBottom:"headroom--not-bottom",initial:"headroom"}},s.cutsTheMustard=!!(t()&&function(){}.bind&&"classList"in document.documentElement&&Object.assign&&Object.keys&&requestAnimationFrame),s});
 
 
 
 
 
 
 
 
spaces/AnthonyTruchetPoC/persistent-docker/scripts/common_header.sh DELETED
@@ -1,7 +0,0 @@
1
- CONTAINER_NAME=persistent-docker-space
2
- VOLUME_NAME=ai-playground-vol
3
-
4
- set -e # Exits on first error
5
-
6
- SCRIPT_PATH="$(realpath "${BASH_SOURCE:-$0}")"
7
- ROOT_DIRECTORY="$(dirname "$(dirname "${SCRIPT_PATH}")")"
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/pipelines/pipeline_tuneavideo.py DELETED
@@ -1,411 +0,0 @@
1
- # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
2
-
3
- import inspect
4
- from dataclasses import dataclass
5
- from typing import Callable, List, Optional, Union
6
-
7
- import numpy as np
8
- import torch
9
- from diffusers.configuration_utils import FrozenDict
10
- from diffusers.models import AutoencoderKL
11
- from diffusers.pipeline_utils import DiffusionPipeline
12
- from diffusers.schedulers import (
13
- DDIMScheduler,
14
- DPMSolverMultistepScheduler,
15
- EulerAncestralDiscreteScheduler,
16
- EulerDiscreteScheduler,
17
- LMSDiscreteScheduler,
18
- PNDMScheduler,
19
- )
20
- from diffusers.utils import BaseOutput, deprecate, is_accelerate_available, logging
21
- from einops import rearrange
22
- from packaging import version
23
- from transformers import CLIPTextModel, CLIPTokenizer
24
-
25
- from ..models.unet import UNet3DConditionModel
26
-
27
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
-
29
-
30
- @dataclass
31
- class TuneAVideoPipelineOutput(BaseOutput):
32
- videos: Union[torch.Tensor, np.ndarray]
33
-
34
-
35
- class TuneAVideoPipeline(DiffusionPipeline):
36
- _optional_components = []
37
-
38
- def __init__(
39
- self,
40
- vae: AutoencoderKL,
41
- text_encoder: CLIPTextModel,
42
- tokenizer: CLIPTokenizer,
43
- unet: UNet3DConditionModel,
44
- scheduler: Union[
45
- DDIMScheduler,
46
- PNDMScheduler,
47
- LMSDiscreteScheduler,
48
- EulerDiscreteScheduler,
49
- EulerAncestralDiscreteScheduler,
50
- DPMSolverMultistepScheduler,
51
- ],
52
- ):
53
- super().__init__()
54
-
55
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
56
- deprecation_message = (
57
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
58
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
59
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
60
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
61
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
62
- " file"
63
- )
64
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
65
- new_config = dict(scheduler.config)
66
- new_config["steps_offset"] = 1
67
- scheduler._internal_dict = FrozenDict(new_config)
68
-
69
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
70
- deprecation_message = (
71
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
72
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
73
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
74
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
75
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
76
- )
77
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
78
- new_config = dict(scheduler.config)
79
- new_config["clip_sample"] = False
80
- scheduler._internal_dict = FrozenDict(new_config)
81
-
82
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
83
- version.parse(unet.config._diffusers_version).base_version
84
- ) < version.parse("0.9.0.dev0")
85
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
86
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
87
- deprecation_message = (
88
- "The configuration file of the unet has set the default `sample_size` to smaller than"
89
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
90
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
91
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
92
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
93
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
94
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
95
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
96
- " the `unet/config.json` file"
97
- )
98
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
99
- new_config = dict(unet.config)
100
- new_config["sample_size"] = 64
101
- unet._internal_dict = FrozenDict(new_config)
102
-
103
- self.register_modules(
104
- vae=vae,
105
- text_encoder=text_encoder,
106
- tokenizer=tokenizer,
107
- unet=unet,
108
- scheduler=scheduler,
109
- )
110
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
111
-
112
- def enable_vae_slicing(self):
113
- self.vae.enable_slicing()
114
-
115
- def disable_vae_slicing(self):
116
- self.vae.disable_slicing()
117
-
118
- def enable_sequential_cpu_offload(self, gpu_id=0):
119
- if is_accelerate_available():
120
- from accelerate import cpu_offload
121
- else:
122
- raise ImportError("Please install accelerate via `pip install accelerate`")
123
-
124
- device = torch.device(f"cuda:{gpu_id}")
125
-
126
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
127
- if cpu_offloaded_model is not None:
128
- cpu_offload(cpu_offloaded_model, device)
129
-
130
- @property
131
- def _execution_device(self):
132
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
133
- return self.device
134
- for module in self.unet.modules():
135
- if (
136
- hasattr(module, "_hf_hook")
137
- and hasattr(module._hf_hook, "execution_device")
138
- and module._hf_hook.execution_device is not None
139
- ):
140
- return torch.device(module._hf_hook.execution_device)
141
- return self.device
142
-
143
- def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt):
144
- batch_size = len(prompt) if isinstance(prompt, list) else 1
145
-
146
- text_inputs = self.tokenizer(
147
- prompt,
148
- padding="max_length",
149
- max_length=self.tokenizer.model_max_length,
150
- truncation=True,
151
- return_tensors="pt",
152
- )
153
- text_input_ids = text_inputs.input_ids
154
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
155
-
156
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
157
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
158
- logger.warning(
159
- "The following part of your input was truncated because CLIP can only handle sequences up to"
160
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
161
- )
162
-
163
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
164
- attention_mask = text_inputs.attention_mask.to(device)
165
- else:
166
- attention_mask = None
167
-
168
- text_embeddings = self.text_encoder(
169
- text_input_ids.to(device),
170
- attention_mask=attention_mask,
171
- )
172
- text_embeddings = text_embeddings[0]
173
-
174
- # duplicate text embeddings for each generation per prompt, using mps friendly method
175
- bs_embed, seq_len, _ = text_embeddings.shape
176
- text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1)
177
- text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)
178
-
179
- # get unconditional embeddings for classifier free guidance
180
- if do_classifier_free_guidance:
181
- uncond_tokens: List[str]
182
- if negative_prompt is None:
183
- uncond_tokens = [""] * batch_size
184
- elif type(prompt) is not type(negative_prompt):
185
- raise TypeError(
186
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
187
- f" {type(prompt)}."
188
- )
189
- elif isinstance(negative_prompt, str):
190
- uncond_tokens = [negative_prompt]
191
- elif batch_size != len(negative_prompt):
192
- raise ValueError(
193
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
194
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
195
- " the batch size of `prompt`."
196
- )
197
- else:
198
- uncond_tokens = negative_prompt
199
-
200
- max_length = text_input_ids.shape[-1]
201
- uncond_input = self.tokenizer(
202
- uncond_tokens,
203
- padding="max_length",
204
- max_length=max_length,
205
- truncation=True,
206
- return_tensors="pt",
207
- )
208
-
209
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
210
- attention_mask = uncond_input.attention_mask.to(device)
211
- else:
212
- attention_mask = None
213
-
214
- uncond_embeddings = self.text_encoder(
215
- uncond_input.input_ids.to(device),
216
- attention_mask=attention_mask,
217
- )
218
- uncond_embeddings = uncond_embeddings[0]
219
-
220
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
221
- seq_len = uncond_embeddings.shape[1]
222
- uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1)
223
- uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1)
224
-
225
- # For classifier free guidance, we need to do two forward passes.
226
- # Here we concatenate the unconditional and text embeddings into a single batch
227
- # to avoid doing two forward passes
228
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
229
-
230
- return text_embeddings
231
-
232
- def decode_latents(self, latents):
233
- video_length = latents.shape[2]
234
- latents = 1 / 0.18215 * latents
235
- latents = rearrange(latents, "b c f h w -> (b f) c h w")
236
- video = self.vae.decode(latents).sample
237
- video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
238
- video = (video / 2 + 0.5).clamp(0, 1)
239
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
240
- video = video.cpu().float().numpy()
241
- return video
242
-
243
- def prepare_extra_step_kwargs(self, generator, eta):
244
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
245
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
246
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
247
- # and should be between [0, 1]
248
-
249
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
250
- extra_step_kwargs = {}
251
- if accepts_eta:
252
- extra_step_kwargs["eta"] = eta
253
-
254
- # check if the scheduler accepts generator
255
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
256
- if accepts_generator:
257
- extra_step_kwargs["generator"] = generator
258
- return extra_step_kwargs
259
-
260
- def check_inputs(self, prompt, height, width, callback_steps):
261
- if not isinstance(prompt, str) and not isinstance(prompt, list):
262
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
263
-
264
- if height % 8 != 0 or width % 8 != 0:
265
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
266
-
267
- if (callback_steps is None) or (
268
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
269
- ):
270
- raise ValueError(
271
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
272
- f" {type(callback_steps)}."
273
- )
274
-
275
- def prepare_latents(
276
- self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None
277
- ):
278
- shape = (
279
- batch_size,
280
- num_channels_latents,
281
- video_length,
282
- height // self.vae_scale_factor,
283
- width // self.vae_scale_factor,
284
- )
285
- if isinstance(generator, list) and len(generator) != batch_size:
286
- raise ValueError(
287
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
288
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
289
- )
290
-
291
- if latents is None:
292
- rand_device = "cpu" if device.type == "mps" else device
293
-
294
- if isinstance(generator, list):
295
- shape = (1,) + shape[1:]
296
- latents = [
297
- torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)
298
- for i in range(batch_size)
299
- ]
300
- latents = torch.cat(latents, dim=0).to(device)
301
- else:
302
- latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
303
- else:
304
- if latents.shape != shape:
305
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
306
- latents = latents.to(device)
307
-
308
- # scale the initial noise by the standard deviation required by the scheduler
309
- latents = latents * self.scheduler.init_noise_sigma
310
- return latents
311
-
312
- @torch.no_grad()
313
- def __call__(
314
- self,
315
- prompt: Union[str, List[str]],
316
- video_length: Optional[int],
317
- height: Optional[int] = None,
318
- width: Optional[int] = None,
319
- num_inference_steps: int = 50,
320
- guidance_scale: float = 7.5,
321
- negative_prompt: Optional[Union[str, List[str]]] = None,
322
- num_videos_per_prompt: Optional[int] = 1,
323
- eta: float = 0.0,
324
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
325
- latents: Optional[torch.FloatTensor] = None,
326
- output_type: Optional[str] = "tensor",
327
- return_dict: bool = True,
328
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
329
- callback_steps: Optional[int] = 1,
330
- **kwargs,
331
- ):
332
- # Default height and width to unet
333
- height = height or self.unet.config.sample_size * self.vae_scale_factor
334
- width = width or self.unet.config.sample_size * self.vae_scale_factor
335
-
336
- # Check inputs. Raise error if not correct
337
- self.check_inputs(prompt, height, width, callback_steps)
338
-
339
- # Define call parameters
340
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
341
- device = self._execution_device
342
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
343
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
344
- # corresponds to doing no classifier free guidance.
345
- do_classifier_free_guidance = guidance_scale > 1.0
346
-
347
- # Encode input prompt
348
- text_embeddings = self._encode_prompt(
349
- prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt
350
- )
351
-
352
- # Prepare timesteps
353
- self.scheduler.set_timesteps(num_inference_steps, device=device)
354
- timesteps = self.scheduler.timesteps
355
-
356
- # Prepare latent variables
357
- num_channels_latents = self.unet.in_channels
358
- latents = self.prepare_latents(
359
- batch_size * num_videos_per_prompt,
360
- num_channels_latents,
361
- video_length,
362
- height,
363
- width,
364
- text_embeddings.dtype,
365
- device,
366
- generator,
367
- latents,
368
- )
369
- latents_dtype = latents.dtype
370
-
371
- # Prepare extra step kwargs.
372
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
373
-
374
- # Denoising loop
375
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
376
- with self.progress_bar(total=num_inference_steps) as progress_bar:
377
- for i, t in enumerate(timesteps):
378
- # expand the latents if we are doing classifier free guidance
379
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
380
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
381
-
382
- # predict the noise residual
383
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(
384
- dtype=latents_dtype
385
- )
386
-
387
- # perform guidance
388
- if do_classifier_free_guidance:
389
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
390
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
391
-
392
- # compute the previous noisy sample x_t -> x_t-1
393
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
394
-
395
- # call the callback, if provided
396
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
397
- progress_bar.update()
398
- if callback is not None and i % callback_steps == 0:
399
- callback(i, t, latents)
400
-
401
- # Post-processing
402
- video = self.decode_latents(latents)
403
-
404
- # Convert to tensor
405
- if output_type == "tensor":
406
- video = torch.from_numpy(video)
407
-
408
- if not return_dict:
409
- return video
410
-
411
- return TuneAVideoPipelineOutput(videos=video)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/bert_vits2/text/chinese_bert.py DELETED
@@ -1,70 +0,0 @@
1
- import os
2
-
3
- import config
4
- import torch
5
- from transformers import AutoTokenizer, AutoModelForMaskedLM
6
- from logger import logger
7
- from utils.download import download_and_verify
8
- from config import DEVICE as device
9
-
10
- URLS = [
11
- "https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/resolve/main/pytorch_model.bin",
12
- ]
13
- TARGET_PATH = os.path.join(config.ABS_PATH, "bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin")
14
- EXPECTED_MD5 = None
15
-
16
- if not os.path.exists(TARGET_PATH):
17
- success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5)
18
-
19
- try:
20
- logger.info("Loading chinese-roberta-wwm-ext-large...")
21
- tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large")
22
- model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large").to(
23
- device)
24
- logger.info("Loading finished.")
25
- except Exception as e:
26
- logger.error(e)
27
- logger.error(f"Please download pytorch_model.bin from hfl/chinese-roberta-wwm-ext-large.")
28
-
29
-
30
- def get_bert_feature(text, word2ph, device=config.DEVICE):
31
- with torch.no_grad():
32
- inputs = tokenizer(text, return_tensors='pt')
33
- for i in inputs:
34
- inputs[i] = inputs[i].to(device)
35
- res = model(**inputs, output_hidden_states=True)
36
- res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu()
37
-
38
- assert len(word2ph) == len(text) + 2
39
- word2phone = word2ph
40
- phone_level_feature = []
41
- for i in range(len(word2phone)):
42
- repeat_feature = res[i].repeat(word2phone[i], 1)
43
- phone_level_feature.append(repeat_feature)
44
-
45
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
46
-
47
- return phone_level_feature.T
48
-
49
-
50
- if __name__ == '__main__':
51
- import torch
52
-
53
- word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
54
- word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2,
55
- 2, 2, 2, 1]
56
-
57
- # 计算总帧数
58
- total_frames = sum(word2phone)
59
- print(word_level_feature.shape)
60
- print(word2phone)
61
- phone_level_feature = []
62
- for i in range(len(word2phone)):
63
- print(word_level_feature[i].shape)
64
-
65
- # 对每个词重复word2phone[i]次
66
- repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
67
- phone_level_feature.append(repeat_feature)
68
-
69
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
70
- print(phone_level_feature.shape) # torch.Size([36, 1024])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AsakuraMizu/moe-tts/utils.py DELETED
@@ -1,226 +0,0 @@
1
- import os
2
- import glob
3
- import sys
4
- import argparse
5
- import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
15
- logger = logging
16
-
17
-
18
- def load_checkpoint(checkpoint_path, model, optimizer=None):
19
- assert os.path.isfile(checkpoint_path)
20
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
- iteration = checkpoint_dict['iteration']
22
- learning_rate = checkpoint_dict['learning_rate']
23
- if optimizer is not None:
24
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
- saved_state_dict = checkpoint_dict['model']
26
- if hasattr(model, 'module'):
27
- state_dict = model.module.state_dict()
28
- else:
29
- state_dict = model.state_dict()
30
- new_state_dict = {}
31
- for k, v in state_dict.items():
32
- try:
33
- new_state_dict[k] = saved_state_dict[k]
34
- except:
35
- logger.info("%s is not in the checkpoint" % k)
36
- new_state_dict[k] = v
37
- if hasattr(model, 'module'):
38
- model.module.load_state_dict(new_state_dict)
39
- else:
40
- model.load_state_dict(new_state_dict)
41
- logger.info("Loaded checkpoint '{}' (iteration {})".format(
42
- checkpoint_path, iteration))
43
- return model, optimizer, learning_rate, iteration
44
-
45
-
46
- def plot_spectrogram_to_numpy(spectrogram):
47
- global MATPLOTLIB_FLAG
48
- if not MATPLOTLIB_FLAG:
49
- import matplotlib
50
- matplotlib.use("Agg")
51
- MATPLOTLIB_FLAG = True
52
- mpl_logger = logging.getLogger('matplotlib')
53
- mpl_logger.setLevel(logging.WARNING)
54
- import matplotlib.pylab as plt
55
- import numpy as np
56
-
57
- fig, ax = plt.subplots(figsize=(10, 2))
58
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
59
- interpolation='none')
60
- plt.colorbar(im, ax=ax)
61
- plt.xlabel("Frames")
62
- plt.ylabel("Channels")
63
- plt.tight_layout()
64
-
65
- fig.canvas.draw()
66
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
67
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
68
- plt.close()
69
- return data
70
-
71
-
72
- def plot_alignment_to_numpy(alignment, info=None):
73
- global MATPLOTLIB_FLAG
74
- if not MATPLOTLIB_FLAG:
75
- import matplotlib
76
- matplotlib.use("Agg")
77
- MATPLOTLIB_FLAG = True
78
- mpl_logger = logging.getLogger('matplotlib')
79
- mpl_logger.setLevel(logging.WARNING)
80
- import matplotlib.pylab as plt
81
- import numpy as np
82
-
83
- fig, ax = plt.subplots(figsize=(6, 4))
84
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
85
- interpolation='none')
86
- fig.colorbar(im, ax=ax)
87
- xlabel = 'Decoder timestep'
88
- if info is not None:
89
- xlabel += '\n\n' + info
90
- plt.xlabel(xlabel)
91
- plt.ylabel('Encoder timestep')
92
- plt.tight_layout()
93
-
94
- fig.canvas.draw()
95
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
96
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
97
- plt.close()
98
- return data
99
-
100
-
101
- def load_wav_to_torch(full_path):
102
- sampling_rate, data = read(full_path)
103
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
104
-
105
-
106
- def load_filepaths_and_text(filename, split="|"):
107
- with open(filename, encoding='utf-8') as f:
108
- filepaths_and_text = [line.strip().split(split) for line in f]
109
- return filepaths_and_text
110
-
111
-
112
- def get_hparams(init=True):
113
- parser = argparse.ArgumentParser()
114
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
115
- help='JSON file for configuration')
116
- parser.add_argument('-m', '--model', type=str, required=True,
117
- help='Model name')
118
-
119
- args = parser.parse_args()
120
- model_dir = os.path.join("./logs", args.model)
121
-
122
- if not os.path.exists(model_dir):
123
- os.makedirs(model_dir)
124
-
125
- config_path = args.config
126
- config_save_path = os.path.join(model_dir, "config.json")
127
- if init:
128
- with open(config_path, "r") as f:
129
- data = f.read()
130
- with open(config_save_path, "w") as f:
131
- f.write(data)
132
- else:
133
- with open(config_save_path, "r") as f:
134
- data = f.read()
135
- config = json.loads(data)
136
-
137
- hparams = HParams(**config)
138
- hparams.model_dir = model_dir
139
- return hparams
140
-
141
-
142
- def get_hparams_from_dir(model_dir):
143
- config_save_path = os.path.join(model_dir, "config.json")
144
- with open(config_save_path, "r") as f:
145
- data = f.read()
146
- config = json.loads(data)
147
-
148
- hparams = HParams(**config)
149
- hparams.model_dir = model_dir
150
- return hparams
151
-
152
-
153
- def get_hparams_from_file(config_path):
154
- with open(config_path, "r", encoding="utf-8") as f:
155
- data = f.read()
156
- config = json.loads(data)
157
-
158
- hparams = HParams(**config)
159
- return hparams
160
-
161
-
162
- def check_git_hash(model_dir):
163
- source_dir = os.path.dirname(os.path.realpath(__file__))
164
- if not os.path.exists(os.path.join(source_dir, ".git")):
165
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
166
- source_dir
167
- ))
168
- return
169
-
170
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
171
-
172
- path = os.path.join(model_dir, "githash")
173
- if os.path.exists(path):
174
- saved_hash = open(path).read()
175
- if saved_hash != cur_hash:
176
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
177
- saved_hash[:8], cur_hash[:8]))
178
- else:
179
- open(path, "w").write(cur_hash)
180
-
181
-
182
- def get_logger(model_dir, filename="train.log"):
183
- global logger
184
- logger = logging.getLogger(os.path.basename(model_dir))
185
- logger.setLevel(logging.DEBUG)
186
-
187
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
188
- if not os.path.exists(model_dir):
189
- os.makedirs(model_dir)
190
- h = logging.FileHandler(os.path.join(model_dir, filename))
191
- h.setLevel(logging.DEBUG)
192
- h.setFormatter(formatter)
193
- logger.addHandler(h)
194
- return logger
195
-
196
-
197
- class HParams():
198
- def __init__(self, **kwargs):
199
- for k, v in kwargs.items():
200
- if type(v) == dict:
201
- v = HParams(**v)
202
- self[k] = v
203
-
204
- def keys(self):
205
- return self.__dict__.keys()
206
-
207
- def items(self):
208
- return self.__dict__.items()
209
-
210
- def values(self):
211
- return self.__dict__.values()
212
-
213
- def __len__(self):
214
- return len(self.__dict__)
215
-
216
- def __getitem__(self, key):
217
- return getattr(self, key)
218
-
219
- def __setitem__(self, key, value):
220
- return setattr(self, key, value)
221
-
222
- def __contains__(self, key):
223
- return key in self.__dict__
224
-
225
- def __repr__(self):
226
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/resolvers.py DELETED
@@ -1,547 +0,0 @@
1
- import collections
2
- import itertools
3
- import operator
4
-
5
- from .providers import AbstractResolver
6
- from .structs import DirectedGraph, IteratorMapping, build_iter_view
7
-
8
- RequirementInformation = collections.namedtuple(
9
- "RequirementInformation", ["requirement", "parent"]
10
- )
11
-
12
-
13
- class ResolverException(Exception):
14
- """A base class for all exceptions raised by this module.
15
-
16
- Exceptions derived by this class should all be handled in this module. Any
17
- bubbling pass the resolver should be treated as a bug.
18
- """
19
-
20
-
21
- class RequirementsConflicted(ResolverException):
22
- def __init__(self, criterion):
23
- super(RequirementsConflicted, self).__init__(criterion)
24
- self.criterion = criterion
25
-
26
- def __str__(self):
27
- return "Requirements conflict: {}".format(
28
- ", ".join(repr(r) for r in self.criterion.iter_requirement()),
29
- )
30
-
31
-
32
- class InconsistentCandidate(ResolverException):
33
- def __init__(self, candidate, criterion):
34
- super(InconsistentCandidate, self).__init__(candidate, criterion)
35
- self.candidate = candidate
36
- self.criterion = criterion
37
-
38
- def __str__(self):
39
- return "Provided candidate {!r} does not satisfy {}".format(
40
- self.candidate,
41
- ", ".join(repr(r) for r in self.criterion.iter_requirement()),
42
- )
43
-
44
-
45
- class Criterion(object):
46
- """Representation of possible resolution results of a package.
47
-
48
- This holds three attributes:
49
-
50
- * `information` is a collection of `RequirementInformation` pairs.
51
- Each pair is a requirement contributing to this criterion, and the
52
- candidate that provides the requirement.
53
- * `incompatibilities` is a collection of all known not-to-work candidates
54
- to exclude from consideration.
55
- * `candidates` is a collection containing all possible candidates deducted
56
- from the union of contributing requirements and known incompatibilities.
57
- It should never be empty, except when the criterion is an attribute of a
58
- raised `RequirementsConflicted` (in which case it is always empty).
59
-
60
- .. note::
61
- This class is intended to be externally immutable. **Do not** mutate
62
- any of its attribute containers.
63
- """
64
-
65
- def __init__(self, candidates, information, incompatibilities):
66
- self.candidates = candidates
67
- self.information = information
68
- self.incompatibilities = incompatibilities
69
-
70
- def __repr__(self):
71
- requirements = ", ".join(
72
- "({!r}, via={!r})".format(req, parent)
73
- for req, parent in self.information
74
- )
75
- return "Criterion({})".format(requirements)
76
-
77
- def iter_requirement(self):
78
- return (i.requirement for i in self.information)
79
-
80
- def iter_parent(self):
81
- return (i.parent for i in self.information)
82
-
83
-
84
- class ResolutionError(ResolverException):
85
- pass
86
-
87
-
88
- class ResolutionImpossible(ResolutionError):
89
- def __init__(self, causes):
90
- super(ResolutionImpossible, self).__init__(causes)
91
- # causes is a list of RequirementInformation objects
92
- self.causes = causes
93
-
94
-
95
- class ResolutionTooDeep(ResolutionError):
96
- def __init__(self, round_count):
97
- super(ResolutionTooDeep, self).__init__(round_count)
98
- self.round_count = round_count
99
-
100
-
101
- # Resolution state in a round.
102
- State = collections.namedtuple("State", "mapping criteria backtrack_causes")
103
-
104
-
105
- class Resolution(object):
106
- """Stateful resolution object.
107
-
108
- This is designed as a one-off object that holds information to kick start
109
- the resolution process, and holds the results afterwards.
110
- """
111
-
112
- def __init__(self, provider, reporter):
113
- self._p = provider
114
- self._r = reporter
115
- self._states = []
116
-
117
- @property
118
- def state(self):
119
- try:
120
- return self._states[-1]
121
- except IndexError:
122
- raise AttributeError("state")
123
-
124
- def _push_new_state(self):
125
- """Push a new state into history.
126
-
127
- This new state will be used to hold resolution results of the next
128
- coming round.
129
- """
130
- base = self._states[-1]
131
- state = State(
132
- mapping=base.mapping.copy(),
133
- criteria=base.criteria.copy(),
134
- backtrack_causes=base.backtrack_causes[:],
135
- )
136
- self._states.append(state)
137
-
138
- def _add_to_criteria(self, criteria, requirement, parent):
139
- self._r.adding_requirement(requirement=requirement, parent=parent)
140
-
141
- identifier = self._p.identify(requirement_or_candidate=requirement)
142
- criterion = criteria.get(identifier)
143
- if criterion:
144
- incompatibilities = list(criterion.incompatibilities)
145
- else:
146
- incompatibilities = []
147
-
148
- matches = self._p.find_matches(
149
- identifier=identifier,
150
- requirements=IteratorMapping(
151
- criteria,
152
- operator.methodcaller("iter_requirement"),
153
- {identifier: [requirement]},
154
- ),
155
- incompatibilities=IteratorMapping(
156
- criteria,
157
- operator.attrgetter("incompatibilities"),
158
- {identifier: incompatibilities},
159
- ),
160
- )
161
-
162
- if criterion:
163
- information = list(criterion.information)
164
- information.append(RequirementInformation(requirement, parent))
165
- else:
166
- information = [RequirementInformation(requirement, parent)]
167
-
168
- criterion = Criterion(
169
- candidates=build_iter_view(matches),
170
- information=information,
171
- incompatibilities=incompatibilities,
172
- )
173
- if not criterion.candidates:
174
- raise RequirementsConflicted(criterion)
175
- criteria[identifier] = criterion
176
-
177
- def _remove_information_from_criteria(self, criteria, parents):
178
- """Remove information from parents of criteria.
179
-
180
- Concretely, removes all values from each criterion's ``information``
181
- field that have one of ``parents`` as provider of the requirement.
182
-
183
- :param criteria: The criteria to update.
184
- :param parents: Identifiers for which to remove information from all criteria.
185
- """
186
- if not parents:
187
- return
188
- for key, criterion in criteria.items():
189
- criteria[key] = Criterion(
190
- criterion.candidates,
191
- [
192
- information
193
- for information in criterion.information
194
- if (
195
- information.parent is None
196
- or self._p.identify(information.parent) not in parents
197
- )
198
- ],
199
- criterion.incompatibilities,
200
- )
201
-
202
- def _get_preference(self, name):
203
- return self._p.get_preference(
204
- identifier=name,
205
- resolutions=self.state.mapping,
206
- candidates=IteratorMapping(
207
- self.state.criteria,
208
- operator.attrgetter("candidates"),
209
- ),
210
- information=IteratorMapping(
211
- self.state.criteria,
212
- operator.attrgetter("information"),
213
- ),
214
- backtrack_causes=self.state.backtrack_causes,
215
- )
216
-
217
- def _is_current_pin_satisfying(self, name, criterion):
218
- try:
219
- current_pin = self.state.mapping[name]
220
- except KeyError:
221
- return False
222
- return all(
223
- self._p.is_satisfied_by(requirement=r, candidate=current_pin)
224
- for r in criterion.iter_requirement()
225
- )
226
-
227
- def _get_updated_criteria(self, candidate):
228
- criteria = self.state.criteria.copy()
229
- for requirement in self._p.get_dependencies(candidate=candidate):
230
- self._add_to_criteria(criteria, requirement, parent=candidate)
231
- return criteria
232
-
233
- def _attempt_to_pin_criterion(self, name):
234
- criterion = self.state.criteria[name]
235
-
236
- causes = []
237
- for candidate in criterion.candidates:
238
- try:
239
- criteria = self._get_updated_criteria(candidate)
240
- except RequirementsConflicted as e:
241
- self._r.rejecting_candidate(e.criterion, candidate)
242
- causes.append(e.criterion)
243
- continue
244
-
245
- # Check the newly-pinned candidate actually works. This should
246
- # always pass under normal circumstances, but in the case of a
247
- # faulty provider, we will raise an error to notify the implementer
248
- # to fix find_matches() and/or is_satisfied_by().
249
- satisfied = all(
250
- self._p.is_satisfied_by(requirement=r, candidate=candidate)
251
- for r in criterion.iter_requirement()
252
- )
253
- if not satisfied:
254
- raise InconsistentCandidate(candidate, criterion)
255
-
256
- self._r.pinning(candidate=candidate)
257
- self.state.criteria.update(criteria)
258
-
259
- # Put newly-pinned candidate at the end. This is essential because
260
- # backtracking looks at this mapping to get the last pin.
261
- self.state.mapping.pop(name, None)
262
- self.state.mapping[name] = candidate
263
-
264
- return []
265
-
266
- # All candidates tried, nothing works. This criterion is a dead
267
- # end, signal for backtracking.
268
- return causes
269
-
270
- def _backjump(self, causes):
271
- """Perform backjumping.
272
-
273
- When we enter here, the stack is like this::
274
-
275
- [ state Z ]
276
- [ state Y ]
277
- [ state X ]
278
- .... earlier states are irrelevant.
279
-
280
- 1. No pins worked for Z, so it does not have a pin.
281
- 2. We want to reset state Y to unpinned, and pin another candidate.
282
- 3. State X holds what state Y was before the pin, but does not
283
- have the incompatibility information gathered in state Y.
284
-
285
- Each iteration of the loop will:
286
-
287
- 1. Identify Z. The incompatibility is not always caused by the latest
288
- state. For example, given three requirements A, B and C, with
289
- dependencies A1, B1 and C1, where A1 and B1 are incompatible: the
290
- last state might be related to C, so we want to discard the
291
- previous state.
292
- 2. Discard Z.
293
- 3. Discard Y but remember its incompatibility information gathered
294
- previously, and the failure we're dealing with right now.
295
- 4. Push a new state Y' based on X, and apply the incompatibility
296
- information from Y to Y'.
297
- 5a. If this causes Y' to conflict, we need to backtrack again. Make Y'
298
- the new Z and go back to step 2.
299
- 5b. If the incompatibilities apply cleanly, end backtracking.
300
- """
301
- incompatible_reqs = itertools.chain(
302
- (c.parent for c in causes if c.parent is not None),
303
- (c.requirement for c in causes),
304
- )
305
- incompatible_deps = {self._p.identify(r) for r in incompatible_reqs}
306
- while len(self._states) >= 3:
307
- # Remove the state that triggered backtracking.
308
- del self._states[-1]
309
-
310
- # Ensure to backtrack to a state that caused the incompatibility
311
- incompatible_state = False
312
- while not incompatible_state:
313
- # Retrieve the last candidate pin and known incompatibilities.
314
- try:
315
- broken_state = self._states.pop()
316
- name, candidate = broken_state.mapping.popitem()
317
- except (IndexError, KeyError):
318
- raise ResolutionImpossible(causes)
319
- current_dependencies = {
320
- self._p.identify(d)
321
- for d in self._p.get_dependencies(candidate)
322
- }
323
- incompatible_state = not current_dependencies.isdisjoint(
324
- incompatible_deps
325
- )
326
-
327
- incompatibilities_from_broken = [
328
- (k, list(v.incompatibilities))
329
- for k, v in broken_state.criteria.items()
330
- ]
331
-
332
- # Also mark the newly known incompatibility.
333
- incompatibilities_from_broken.append((name, [candidate]))
334
-
335
- # Create a new state from the last known-to-work one, and apply
336
- # the previously gathered incompatibility information.
337
- def _patch_criteria():
338
- for k, incompatibilities in incompatibilities_from_broken:
339
- if not incompatibilities:
340
- continue
341
- try:
342
- criterion = self.state.criteria[k]
343
- except KeyError:
344
- continue
345
- matches = self._p.find_matches(
346
- identifier=k,
347
- requirements=IteratorMapping(
348
- self.state.criteria,
349
- operator.methodcaller("iter_requirement"),
350
- ),
351
- incompatibilities=IteratorMapping(
352
- self.state.criteria,
353
- operator.attrgetter("incompatibilities"),
354
- {k: incompatibilities},
355
- ),
356
- )
357
- candidates = build_iter_view(matches)
358
- if not candidates:
359
- return False
360
- incompatibilities.extend(criterion.incompatibilities)
361
- self.state.criteria[k] = Criterion(
362
- candidates=candidates,
363
- information=list(criterion.information),
364
- incompatibilities=incompatibilities,
365
- )
366
- return True
367
-
368
- self._push_new_state()
369
- success = _patch_criteria()
370
-
371
- # It works! Let's work on this new state.
372
- if success:
373
- return True
374
-
375
- # State does not work after applying known incompatibilities.
376
- # Try the still previous state.
377
-
378
- # No way to backtrack anymore.
379
- return False
380
-
381
- def resolve(self, requirements, max_rounds):
382
- if self._states:
383
- raise RuntimeError("already resolved")
384
-
385
- self._r.starting()
386
-
387
- # Initialize the root state.
388
- self._states = [
389
- State(
390
- mapping=collections.OrderedDict(),
391
- criteria={},
392
- backtrack_causes=[],
393
- )
394
- ]
395
- for r in requirements:
396
- try:
397
- self._add_to_criteria(self.state.criteria, r, parent=None)
398
- except RequirementsConflicted as e:
399
- raise ResolutionImpossible(e.criterion.information)
400
-
401
- # The root state is saved as a sentinel so the first ever pin can have
402
- # something to backtrack to if it fails. The root state is basically
403
- # pinning the virtual "root" package in the graph.
404
- self._push_new_state()
405
-
406
- for round_index in range(max_rounds):
407
- self._r.starting_round(index=round_index)
408
-
409
- unsatisfied_names = [
410
- key
411
- for key, criterion in self.state.criteria.items()
412
- if not self._is_current_pin_satisfying(key, criterion)
413
- ]
414
-
415
- # All criteria are accounted for. Nothing more to pin, we are done!
416
- if not unsatisfied_names:
417
- self._r.ending(state=self.state)
418
- return self.state
419
-
420
- # keep track of satisfied names to calculate diff after pinning
421
- satisfied_names = set(self.state.criteria.keys()) - set(
422
- unsatisfied_names
423
- )
424
-
425
- # Choose the most preferred unpinned criterion to try.
426
- name = min(unsatisfied_names, key=self._get_preference)
427
- failure_causes = self._attempt_to_pin_criterion(name)
428
-
429
- if failure_causes:
430
- causes = [i for c in failure_causes for i in c.information]
431
- # Backjump if pinning fails. The backjump process puts us in
432
- # an unpinned state, so we can work on it in the next round.
433
- self._r.resolving_conflicts(causes=causes)
434
- success = self._backjump(causes)
435
- self.state.backtrack_causes[:] = causes
436
-
437
- # Dead ends everywhere. Give up.
438
- if not success:
439
- raise ResolutionImpossible(self.state.backtrack_causes)
440
- else:
441
- # discard as information sources any invalidated names
442
- # (unsatisfied names that were previously satisfied)
443
- newly_unsatisfied_names = {
444
- key
445
- for key, criterion in self.state.criteria.items()
446
- if key in satisfied_names
447
- and not self._is_current_pin_satisfying(key, criterion)
448
- }
449
- self._remove_information_from_criteria(
450
- self.state.criteria, newly_unsatisfied_names
451
- )
452
- # Pinning was successful. Push a new state to do another pin.
453
- self._push_new_state()
454
-
455
- self._r.ending_round(index=round_index, state=self.state)
456
-
457
- raise ResolutionTooDeep(max_rounds)
458
-
459
-
460
- def _has_route_to_root(criteria, key, all_keys, connected):
461
- if key in connected:
462
- return True
463
- if key not in criteria:
464
- return False
465
- for p in criteria[key].iter_parent():
466
- try:
467
- pkey = all_keys[id(p)]
468
- except KeyError:
469
- continue
470
- if pkey in connected:
471
- connected.add(key)
472
- return True
473
- if _has_route_to_root(criteria, pkey, all_keys, connected):
474
- connected.add(key)
475
- return True
476
- return False
477
-
478
-
479
- Result = collections.namedtuple("Result", "mapping graph criteria")
480
-
481
-
482
- def _build_result(state):
483
- mapping = state.mapping
484
- all_keys = {id(v): k for k, v in mapping.items()}
485
- all_keys[id(None)] = None
486
-
487
- graph = DirectedGraph()
488
- graph.add(None) # Sentinel as root dependencies' parent.
489
-
490
- connected = {None}
491
- for key, criterion in state.criteria.items():
492
- if not _has_route_to_root(state.criteria, key, all_keys, connected):
493
- continue
494
- if key not in graph:
495
- graph.add(key)
496
- for p in criterion.iter_parent():
497
- try:
498
- pkey = all_keys[id(p)]
499
- except KeyError:
500
- continue
501
- if pkey not in graph:
502
- graph.add(pkey)
503
- graph.connect(pkey, key)
504
-
505
- return Result(
506
- mapping={k: v for k, v in mapping.items() if k in connected},
507
- graph=graph,
508
- criteria=state.criteria,
509
- )
510
-
511
-
512
- class Resolver(AbstractResolver):
513
- """The thing that performs the actual resolution work."""
514
-
515
- base_exception = ResolverException
516
-
517
- def resolve(self, requirements, max_rounds=100):
518
- """Take a collection of constraints, spit out the resolution result.
519
-
520
- The return value is a representation to the final resolution result. It
521
- is a tuple subclass with three public members:
522
-
523
- * `mapping`: A dict of resolved candidates. Each key is an identifier
524
- of a requirement (as returned by the provider's `identify` method),
525
- and the value is the resolved candidate.
526
- * `graph`: A `DirectedGraph` instance representing the dependency tree.
527
- The vertices are keys of `mapping`, and each edge represents *why*
528
- a particular package is included. A special vertex `None` is
529
- included to represent parents of user-supplied requirements.
530
- * `criteria`: A dict of "criteria" that hold detailed information on
531
- how edges in the graph are derived. Each key is an identifier of a
532
- requirement, and the value is a `Criterion` instance.
533
-
534
- The following exceptions may be raised if a resolution cannot be found:
535
-
536
- * `ResolutionImpossible`: A resolution cannot be found for the given
537
- combination of requirements. The `causes` attribute of the
538
- exception is a list of (requirement, parent), giving the
539
- requirements that could not be satisfied.
540
- * `ResolutionTooDeep`: The dependency tree is too deeply nested and
541
- the resolver gave up. This is usually caused by a circular
542
- dependency, but you can try to resolve this by increasing the
543
- `max_rounds` argument.
544
- """
545
- resolution = Resolution(self.provider, self.reporter)
546
- state = resolution.resolve(requirements, max_rounds=max_rounds)
547
- return _build_result(state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py DELETED
@@ -1,51 +0,0 @@
1
- import re
2
-
3
-
4
- SPLIT_RE = re.compile(r'[\.\[\]]+')
5
-
6
-
7
- class JsonSchemaException(ValueError):
8
- """
9
- Base exception of ``fastjsonschema`` library.
10
- """
11
-
12
-
13
- class JsonSchemaValueException(JsonSchemaException):
14
- """
15
- Exception raised by validation function. Available properties:
16
-
17
- * ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``),
18
- * invalid ``value`` (e.g. ``60``),
19
- * ``name`` of a path in the data structure (e.g. ``data.property[index]``),
20
- * ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``),
21
- * the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``),
22
- * ``rule`` which the ``value`` is breaking (e.g. ``maximum``)
23
- * and ``rule_definition`` (e.g. ``42``).
24
-
25
- .. versionchanged:: 2.14.0
26
- Added all extra properties.
27
- """
28
-
29
- def __init__(self, message, value=None, name=None, definition=None, rule=None):
30
- super().__init__(message)
31
- self.message = message
32
- self.value = value
33
- self.name = name
34
- self.definition = definition
35
- self.rule = rule
36
-
37
- @property
38
- def path(self):
39
- return [item for item in SPLIT_RE.split(self.name) if item != '']
40
-
41
- @property
42
- def rule_definition(self):
43
- if not self.rule or not self.definition:
44
- return None
45
- return self.definition.get(self.rule)
46
-
47
-
48
- class JsonSchemaDefinitionException(JsonSchemaException):
49
- """
50
- Exception raised by generator of validation function.
51
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/text/load_text_token.py DELETED
@@ -1,80 +0,0 @@
1
- import torch
2
-
3
-
4
- class LoadTextTokens(object):
5
- def __init__(self, tokenizer, max_text_len=40, padding='do_not_pad'):
6
- self.tokenizer = tokenizer
7
- self.max_text_len = max_text_len
8
- self.padding = padding
9
-
10
- def descriptions_to_text_tokens(self, target, begin_token):
11
- target_encoding = self.tokenizer(
12
- target, padding=self.padding,
13
- add_special_tokens=False,
14
- truncation=True, max_length=self.max_text_len)
15
-
16
- need_predict = [1] * len(target_encoding['input_ids'])
17
- payload = target_encoding['input_ids']
18
- if len(payload) > self.max_text_len - 2:
19
- payload = payload[-(self.max_text_len - 2):]
20
- need_predict = payload[-(self.max_text_len - 2):]
21
-
22
- input_ids = [begin_token] + payload + [self.tokenizer.sep_token_id]
23
-
24
- need_predict = [0] + need_predict + [1]
25
- data = {
26
- 'text_tokens': torch.tensor(input_ids),
27
- 'text_lengths': len(input_ids),
28
- 'need_predict': torch.tensor(need_predict),
29
- }
30
-
31
- return data
32
-
33
- def __call__(self, object_descriptions, box_features, begin_token):
34
- text_tokens = []
35
- text_lengths = []
36
- need_predict = []
37
- for description in object_descriptions:
38
- tokens = self.descriptions_to_text_tokens(description, begin_token)
39
- text_tokens.append(tokens['text_tokens'])
40
- text_lengths.append(tokens['text_lengths'])
41
- need_predict.append(tokens['need_predict'])
42
-
43
- text_tokens = torch.cat(self.collate(text_tokens), dim=0).to(box_features.device)
44
- text_lengths = torch.tensor(text_lengths).to(box_features.device)
45
- need_predict = torch.cat(self.collate(need_predict), dim=0).to(box_features.device)
46
-
47
- assert text_tokens.dim() == 2 and need_predict.dim() == 2
48
- data = {'text_tokens': text_tokens,
49
- 'text_lengths': text_lengths,
50
- 'need_predict': need_predict}
51
-
52
- return data
53
-
54
- def collate(self, batch):
55
- if all(isinstance(b, torch.Tensor) for b in batch) and len(batch) > 0:
56
- if not all(b.shape == batch[0].shape for b in batch[1:]):
57
- assert all(len(b.shape) == len(batch[0].shape) for b in batch[1:])
58
- shape = torch.tensor([b.shape for b in batch])
59
- max_shape = tuple(shape.max(dim=0)[0].tolist())
60
- batch2 = []
61
- for b in batch:
62
- if any(c < m for c, m in zip(b.shape, max_shape)):
63
- b2 = torch.zeros(max_shape, dtype=b.dtype, device=b.device)
64
- if b.dim() == 1:
65
- b2[:b.shape[0]] = b
66
- elif b.dim() == 2:
67
- b2[:b.shape[0], :b.shape[1]] = b
68
- elif b.dim() == 3:
69
- b2[:b.shape[0], :b.shape[1], :b.shape[2]] = b
70
- else:
71
- raise NotImplementedError
72
- b = b2
73
- batch2.append(b[None, ...])
74
- else:
75
- batch2 = []
76
- for b in batch:
77
- batch2.append(b[None, ...])
78
- return batch2
79
- else:
80
- raise NotImplementedError
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py DELETED
@@ -1,72 +0,0 @@
1
- import detectron2.data.transforms as T
2
- from detectron2.config.lazy import LazyCall as L
3
- from detectron2.layers.batch_norm import NaiveSyncBatchNorm
4
- from detectron2.solver import WarmupParamScheduler
5
- from fvcore.common.param_scheduler import MultiStepParamScheduler
6
-
7
- from ..common.data.coco import dataloader
8
- from ..common.models.mask_rcnn_fpn import model
9
- from ..common.optim import SGD as optimizer
10
- from ..common.train import train
11
-
12
- # train from scratch
13
- train.init_checkpoint = ""
14
- train.amp.enabled = True
15
- train.ddp.fp16_compression = True
16
- model.backbone.bottom_up.freeze_at = 0
17
-
18
- # SyncBN
19
- # fmt: off
20
- model.backbone.bottom_up.stem.norm = \
21
- model.backbone.bottom_up.stages.norm = \
22
- model.backbone.norm = "SyncBN"
23
-
24
- # Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by
25
- # torch.nn.SyncBatchNorm. We can remove this after
26
- # https://github.com/pytorch/pytorch/issues/36530 is fixed.
27
- model.roi_heads.box_head.conv_norm = \
28
- model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c,
29
- stats_mode="N")
30
- # fmt: on
31
-
32
- # 2conv in RPN:
33
- # https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950
34
- model.proposal_generator.head.conv_dims = [-1, -1]
35
-
36
- # 4conv1fc box head
37
- model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
38
- model.roi_heads.box_head.fc_dims = [1024]
39
-
40
- # resize_and_crop_image in:
41
- # https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950
42
- image_size = 1024
43
- dataloader.train.mapper.augmentations = [
44
- L(T.ResizeScale)(
45
- min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size
46
- ),
47
- L(T.FixedSizeCrop)(crop_size=(image_size, image_size)),
48
- L(T.RandomFlip)(horizontal=True),
49
- ]
50
-
51
- # recompute boxes due to cropping
52
- dataloader.train.mapper.recompute_boxes = True
53
-
54
- # larger batch-size.
55
- dataloader.train.total_batch_size = 64
56
-
57
- # Equivalent to 100 epochs.
58
- # 100 ep = 184375 iters * 64 images/iter / 118000 images/ep
59
- train.max_iter = 184375
60
-
61
- lr_multiplier = L(WarmupParamScheduler)(
62
- scheduler=L(MultiStepParamScheduler)(
63
- values=[1.0, 0.1, 0.01],
64
- milestones=[163889, 177546],
65
- num_updates=train.max_iter,
66
- ),
67
- warmup_length=500 / train.max_iter,
68
- warmup_factor=0.067,
69
- )
70
-
71
- optimizer.lr = 0.1
72
- optimizer.weight_decay = 4e-5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dla.py DELETED
@@ -1,479 +0,0 @@
1
- import numpy as np
2
- import math
3
- from os.path import join
4
- import fvcore.nn.weight_init as weight_init
5
- import torch
6
- import torch.nn.functional as F
7
- from torch import nn
8
- import torch.utils.model_zoo as model_zoo
9
-
10
- from detectron2.modeling.backbone.resnet import (
11
- BasicStem, BottleneckBlock, DeformBottleneckBlock)
12
- from detectron2.layers import (
13
- Conv2d,
14
- DeformConv,
15
- FrozenBatchNorm2d,
16
- ModulatedDeformConv,
17
- ShapeSpec,
18
- get_norm,
19
- )
20
-
21
- from detectron2.modeling.backbone.backbone import Backbone
22
- from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
23
- from detectron2.modeling.backbone.fpn import FPN
24
-
25
- __all__ = [
26
- "BottleneckBlock",
27
- "DeformBottleneckBlock",
28
- "BasicStem",
29
- ]
30
-
31
- DCNV1 = False
32
-
33
- HASH = {
34
- 34: 'ba72cf86',
35
- 60: '24839fc4',
36
- }
37
-
38
- def get_model_url(data, name, hash):
39
- return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
40
-
41
- class BasicBlock(nn.Module):
42
- def __init__(self, inplanes, planes, stride=1, dilation=1, norm='BN'):
43
- super(BasicBlock, self).__init__()
44
- self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
45
- stride=stride, padding=dilation,
46
- bias=False, dilation=dilation)
47
- self.bn1 = get_norm(norm, planes)
48
- self.relu = nn.ReLU(inplace=True)
49
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
50
- stride=1, padding=dilation,
51
- bias=False, dilation=dilation)
52
- self.bn2 = get_norm(norm, planes)
53
- self.stride = stride
54
-
55
- def forward(self, x, residual=None):
56
- if residual is None:
57
- residual = x
58
-
59
- out = self.conv1(x)
60
- out = self.bn1(out)
61
- out = self.relu(out)
62
-
63
- out = self.conv2(out)
64
- out = self.bn2(out)
65
-
66
- out += residual
67
- out = self.relu(out)
68
-
69
- return out
70
-
71
- class Bottleneck(nn.Module):
72
- expansion = 2
73
-
74
- def __init__(self, inplanes, planes, stride=1, dilation=1, norm='BN'):
75
- super(Bottleneck, self).__init__()
76
- expansion = Bottleneck.expansion
77
- bottle_planes = planes // expansion
78
- self.conv1 = nn.Conv2d(inplanes, bottle_planes,
79
- kernel_size=1, bias=False)
80
- self.bn1 = get_norm(norm, bottle_planes)
81
- self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
82
- stride=stride, padding=dilation,
83
- bias=False, dilation=dilation)
84
- self.bn2 = get_norm(norm, bottle_planes)
85
- self.conv3 = nn.Conv2d(bottle_planes, planes,
86
- kernel_size=1, bias=False)
87
- self.bn3 = get_norm(norm, planes)
88
- self.relu = nn.ReLU(inplace=True)
89
- self.stride = stride
90
-
91
- def forward(self, x, residual=None):
92
- if residual is None:
93
- residual = x
94
-
95
- out = self.conv1(x)
96
- out = self.bn1(out)
97
- out = self.relu(out)
98
-
99
- out = self.conv2(out)
100
- out = self.bn2(out)
101
- out = self.relu(out)
102
-
103
- out = self.conv3(out)
104
- out = self.bn3(out)
105
-
106
- out += residual
107
- out = self.relu(out)
108
-
109
- return out
110
-
111
- class Root(nn.Module):
112
- def __init__(self, in_channels, out_channels, kernel_size, residual, norm='BN'):
113
- super(Root, self).__init__()
114
- self.conv = nn.Conv2d(
115
- in_channels, out_channels, 1,
116
- stride=1, bias=False, padding=(kernel_size - 1) // 2)
117
- self.bn = get_norm(norm, out_channels)
118
- self.relu = nn.ReLU(inplace=True)
119
- self.residual = residual
120
-
121
- def forward(self, *x):
122
- children = x
123
- x = self.conv(torch.cat(x, 1))
124
- x = self.bn(x)
125
- if self.residual:
126
- x += children[0]
127
- x = self.relu(x)
128
-
129
- return x
130
-
131
-
132
- class Tree(nn.Module):
133
- def __init__(self, levels, block, in_channels, out_channels, stride=1,
134
- level_root=False, root_dim=0, root_kernel_size=1,
135
- dilation=1, root_residual=False, norm='BN'):
136
- super(Tree, self).__init__()
137
- if root_dim == 0:
138
- root_dim = 2 * out_channels
139
- if level_root:
140
- root_dim += in_channels
141
- if levels == 1:
142
- self.tree1 = block(in_channels, out_channels, stride,
143
- dilation=dilation, norm=norm)
144
- self.tree2 = block(out_channels, out_channels, 1,
145
- dilation=dilation, norm=norm)
146
- else:
147
- self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
148
- stride, root_dim=0,
149
- root_kernel_size=root_kernel_size,
150
- dilation=dilation, root_residual=root_residual,
151
- norm=norm)
152
- self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
153
- root_dim=root_dim + out_channels,
154
- root_kernel_size=root_kernel_size,
155
- dilation=dilation, root_residual=root_residual,
156
- norm=norm)
157
- if levels == 1:
158
- self.root = Root(root_dim, out_channels, root_kernel_size,
159
- root_residual, norm=norm)
160
- self.level_root = level_root
161
- self.root_dim = root_dim
162
- self.downsample = None
163
- self.project = None
164
- self.levels = levels
165
- if stride > 1:
166
- self.downsample = nn.MaxPool2d(stride, stride=stride)
167
- if in_channels != out_channels:
168
- self.project = nn.Sequential(
169
- nn.Conv2d(in_channels, out_channels,
170
- kernel_size=1, stride=1, bias=False),
171
- get_norm(norm, out_channels)
172
- )
173
-
174
- def forward(self, x, residual=None, children=None):
175
- children = [] if children is None else children
176
- bottom = self.downsample(x) if self.downsample else x
177
- residual = self.project(bottom) if self.project else bottom
178
- if self.level_root:
179
- children.append(bottom)
180
- x1 = self.tree1(x, residual)
181
- if self.levels == 1:
182
- x2 = self.tree2(x1)
183
- x = self.root(x2, x1, *children)
184
- else:
185
- children.append(x1)
186
- x = self.tree2(x1, children=children)
187
- return x
188
-
189
- class DLA(nn.Module):
190
- def __init__(self, num_layers, levels, channels,
191
- block=BasicBlock, residual_root=False, norm='BN'):
192
- """
193
- Args:
194
- """
195
- super(DLA, self).__init__()
196
- self.norm = norm
197
- self.channels = channels
198
- self.base_layer = nn.Sequential(
199
- nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
200
- padding=3, bias=False),
201
- get_norm(self.norm, channels[0]),
202
- nn.ReLU(inplace=True))
203
- self.level0 = self._make_conv_level(
204
- channels[0], channels[0], levels[0])
205
- self.level1 = self._make_conv_level(
206
- channels[0], channels[1], levels[1], stride=2)
207
- self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
208
- level_root=False,
209
- root_residual=residual_root, norm=norm)
210
- self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
211
- level_root=True, root_residual=residual_root,
212
- norm=norm)
213
- self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
214
- level_root=True, root_residual=residual_root,
215
- norm=norm)
216
- self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
217
- level_root=True, root_residual=residual_root,
218
- norm=norm)
219
- self.load_pretrained_model(
220
- data='imagenet', name='dla{}'.format(num_layers),
221
- hash=HASH[num_layers])
222
-
223
- def load_pretrained_model(self, data, name, hash):
224
- model_url = get_model_url(data, name, hash)
225
- model_weights = model_zoo.load_url(model_url)
226
- num_classes = len(model_weights[list(model_weights.keys())[-1]])
227
- self.fc = nn.Conv2d(
228
- self.channels[-1], num_classes,
229
- kernel_size=1, stride=1, padding=0, bias=True)
230
- print('Loading pretrained')
231
- self.load_state_dict(model_weights, strict=False)
232
-
233
- def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
234
- modules = []
235
- for i in range(convs):
236
- modules.extend([
237
- nn.Conv2d(inplanes, planes, kernel_size=3,
238
- stride=stride if i == 0 else 1,
239
- padding=dilation, bias=False, dilation=dilation),
240
- get_norm(self.norm, planes),
241
- nn.ReLU(inplace=True)])
242
- inplanes = planes
243
- return nn.Sequential(*modules)
244
-
245
- def forward(self, x):
246
- y = []
247
- x = self.base_layer(x)
248
- for i in range(6):
249
- x = getattr(self, 'level{}'.format(i))(x)
250
- y.append(x)
251
- return y
252
-
253
-
254
- def fill_up_weights(up):
255
- w = up.weight.data
256
- f = math.ceil(w.size(2) / 2)
257
- c = (2 * f - 1 - f % 2) / (2. * f)
258
- for i in range(w.size(2)):
259
- for j in range(w.size(3)):
260
- w[0, 0, i, j] = \
261
- (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
262
- for c in range(1, w.size(0)):
263
- w[c, 0, :, :] = w[0, 0, :, :]
264
-
265
-
266
- class _DeformConv(nn.Module):
267
- def __init__(self, chi, cho, norm='BN'):
268
- super(_DeformConv, self).__init__()
269
- self.actf = nn.Sequential(
270
- get_norm(norm, cho),
271
- nn.ReLU(inplace=True)
272
- )
273
- if DCNV1:
274
- self.offset = Conv2d(
275
- chi, 18, kernel_size=3, stride=1,
276
- padding=1, dilation=1)
277
- self.conv = DeformConv(
278
- chi, cho, kernel_size=(3,3), stride=1, padding=1,
279
- dilation=1, deformable_groups=1)
280
- else:
281
- self.offset = Conv2d(
282
- chi, 27, kernel_size=3, stride=1,
283
- padding=1, dilation=1)
284
- self.conv = ModulatedDeformConv(
285
- chi, cho, kernel_size=3, stride=1, padding=1,
286
- dilation=1, deformable_groups=1)
287
- nn.init.constant_(self.offset.weight, 0)
288
- nn.init.constant_(self.offset.bias, 0)
289
-
290
- def forward(self, x):
291
- if DCNV1:
292
- offset = self.offset(x)
293
- x = self.conv(x, offset)
294
- else:
295
- offset_mask = self.offset(x)
296
- offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
297
- offset = torch.cat((offset_x, offset_y), dim=1)
298
- mask = mask.sigmoid()
299
- x = self.conv(x, offset, mask)
300
- x = self.actf(x)
301
- return x
302
-
303
-
304
- class IDAUp(nn.Module):
305
- def __init__(self, o, channels, up_f, norm='BN'):
306
- super(IDAUp, self).__init__()
307
- for i in range(1, len(channels)):
308
- c = channels[i]
309
- f = int(up_f[i])
310
- proj = _DeformConv(c, o, norm=norm)
311
- node = _DeformConv(o, o, norm=norm)
312
-
313
- up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
314
- padding=f // 2, output_padding=0,
315
- groups=o, bias=False)
316
- fill_up_weights(up)
317
-
318
- setattr(self, 'proj_' + str(i), proj)
319
- setattr(self, 'up_' + str(i), up)
320
- setattr(self, 'node_' + str(i), node)
321
-
322
-
323
- def forward(self, layers, startp, endp):
324
- for i in range(startp + 1, endp):
325
- upsample = getattr(self, 'up_' + str(i - startp))
326
- project = getattr(self, 'proj_' + str(i - startp))
327
- layers[i] = upsample(project(layers[i]))
328
- node = getattr(self, 'node_' + str(i - startp))
329
- layers[i] = node(layers[i] + layers[i - 1])
330
-
331
-
332
- class DLAUp(nn.Module):
333
- def __init__(self, startp, channels, scales, in_channels=None, norm='BN'):
334
- super(DLAUp, self).__init__()
335
- self.startp = startp
336
- if in_channels is None:
337
- in_channels = channels
338
- self.channels = channels
339
- channels = list(channels)
340
- scales = np.array(scales, dtype=int)
341
- for i in range(len(channels) - 1):
342
- j = -i - 2
343
- setattr(self, 'ida_{}'.format(i),
344
- IDAUp(channels[j], in_channels[j:],
345
- scales[j:] // scales[j], norm=norm))
346
- scales[j + 1:] = scales[j]
347
- in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
348
-
349
- def forward(self, layers):
350
- out = [layers[-1]] # start with 32
351
- for i in range(len(layers) - self.startp - 1):
352
- ida = getattr(self, 'ida_{}'.format(i))
353
- ida(layers, len(layers) -i - 2, len(layers))
354
- out.insert(0, layers[-1])
355
- return out
356
-
357
- DLA_CONFIGS = {
358
- 34: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], BasicBlock),
359
- 60: ([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024], Bottleneck)
360
- }
361
-
362
-
363
- class DLASeg(Backbone):
364
- def __init__(self, num_layers, out_features, use_dla_up=True,
365
- ms_output=False, norm='BN'):
366
- super(DLASeg, self).__init__()
367
- # depth = 34
368
- levels, channels, Block = DLA_CONFIGS[num_layers]
369
- self.base = DLA(num_layers=num_layers,
370
- levels=levels, channels=channels, block=Block, norm=norm)
371
- down_ratio = 4
372
- self.first_level = int(np.log2(down_ratio))
373
- self.ms_output = ms_output
374
- self.last_level = 5 if not self.ms_output else 6
375
- channels = self.base.channels
376
- scales = [2 ** i for i in range(len(channels[self.first_level:]))]
377
- self.use_dla_up = use_dla_up
378
- if self.use_dla_up:
379
- self.dla_up = DLAUp(
380
- self.first_level, channels[self.first_level:], scales,
381
- norm=norm)
382
- out_channel = channels[self.first_level]
383
- if not self.ms_output: # stride 4 DLA
384
- self.ida_up = IDAUp(
385
- out_channel, channels[self.first_level:self.last_level],
386
- [2 ** i for i in range(self.last_level - self.first_level)],
387
- norm=norm)
388
- self._out_features = out_features
389
- self._out_feature_channels = {
390
- 'dla{}'.format(i): channels[i] for i in range(6)}
391
- self._out_feature_strides = {
392
- 'dla{}'.format(i): 2 ** i for i in range(6)}
393
- self._size_divisibility = 32
394
-
395
- @property
396
- def size_divisibility(self):
397
- return self._size_divisibility
398
-
399
- def forward(self, x):
400
- x = self.base(x)
401
- if self.use_dla_up:
402
- x = self.dla_up(x)
403
- if not self.ms_output: # stride 4 dla
404
- y = []
405
- for i in range(self.last_level - self.first_level):
406
- y.append(x[i].clone())
407
- self.ida_up(y, 0, len(y))
408
- ret = {}
409
- for i in range(self.last_level - self.first_level):
410
- out_feature = 'dla{}'.format(i)
411
- if out_feature in self._out_features:
412
- ret[out_feature] = y[i]
413
- else:
414
- ret = {}
415
- st = self.first_level if self.use_dla_up else 0
416
- for i in range(self.last_level - st):
417
- out_feature = 'dla{}'.format(i + st)
418
- if out_feature in self._out_features:
419
- ret[out_feature] = x[i]
420
-
421
- return ret
422
-
423
-
424
- @BACKBONE_REGISTRY.register()
425
- def build_dla_backbone(cfg, input_shape):
426
- """
427
- Create a ResNet instance from config.
428
-
429
- Returns:
430
- ResNet: a :class:`ResNet` instance.
431
- """
432
- return DLASeg(
433
- out_features=cfg.MODEL.DLA.OUT_FEATURES,
434
- num_layers=cfg.MODEL.DLA.NUM_LAYERS,
435
- use_dla_up=cfg.MODEL.DLA.USE_DLA_UP,
436
- ms_output=cfg.MODEL.DLA.MS_OUTPUT,
437
- norm=cfg.MODEL.DLA.NORM)
438
-
439
- class LastLevelP6P7(nn.Module):
440
- """
441
- This module is used in RetinaNet to generate extra layers, P6 and P7 from
442
- C5 feature.
443
- """
444
-
445
- def __init__(self, in_channels, out_channels):
446
- super().__init__()
447
- self.num_levels = 2
448
- self.in_feature = "dla5"
449
- self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
450
- self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
451
- for module in [self.p6, self.p7]:
452
- weight_init.c2_xavier_fill(module)
453
-
454
- def forward(self, c5):
455
- p6 = self.p6(c5)
456
- p7 = self.p7(F.relu(p6))
457
- return [p6, p7]
458
-
459
- @BACKBONE_REGISTRY.register()
460
- def build_retinanet_dla_fpn_backbone(cfg, input_shape: ShapeSpec):
461
- """
462
- Args:
463
- cfg: a detectron2 CfgNode
464
- Returns:
465
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
466
- """
467
- bottom_up = build_dla_backbone(cfg, input_shape)
468
- in_features = cfg.MODEL.FPN.IN_FEATURES
469
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
470
- in_channels_p6p7 = bottom_up.output_shape()['dla5'].channels
471
- backbone = FPN(
472
- bottom_up=bottom_up,
473
- in_features=in_features,
474
- out_channels=out_channels,
475
- norm=cfg.MODEL.FPN.NORM,
476
- top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
477
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
478
- )
479
- return backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_123812KB .py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.bottleneck = nn.Sequential(
104
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
105
- )
106
-
107
- def forward(self, x):
108
- _, _, h, w = x.size()
109
- feat1 = F.interpolate(
110
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
111
- )
112
- feat2 = self.conv2(x)
113
- feat3 = self.conv3(x)
114
- feat4 = self.conv4(x)
115
- feat5 = self.conv5(x)
116
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
117
- bottle = self.bottleneck(out)
118
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/models_onnx.py DELETED
@@ -1,819 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from lib.infer_pack import modules
7
- from lib.infer_pack import attentions
8
- from lib.infer_pack import commons
9
- from lib.infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from lib.infer_pack.commons import init_weights
13
- import numpy as np
14
- from lib.infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder768(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(768, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- stats = self.proj(x) * x_mask
106
-
107
- m, logs = torch.split(stats, self.out_channels, dim=1)
108
- return m, logs, x_mask
109
-
110
-
111
- class ResidualCouplingBlock(nn.Module):
112
- def __init__(
113
- self,
114
- channels,
115
- hidden_channels,
116
- kernel_size,
117
- dilation_rate,
118
- n_layers,
119
- n_flows=4,
120
- gin_channels=0,
121
- ):
122
- super().__init__()
123
- self.channels = channels
124
- self.hidden_channels = hidden_channels
125
- self.kernel_size = kernel_size
126
- self.dilation_rate = dilation_rate
127
- self.n_layers = n_layers
128
- self.n_flows = n_flows
129
- self.gin_channels = gin_channels
130
-
131
- self.flows = nn.ModuleList()
132
- for i in range(n_flows):
133
- self.flows.append(
134
- modules.ResidualCouplingLayer(
135
- channels,
136
- hidden_channels,
137
- kernel_size,
138
- dilation_rate,
139
- n_layers,
140
- gin_channels=gin_channels,
141
- mean_only=True,
142
- )
143
- )
144
- self.flows.append(modules.Flip())
145
-
146
- def forward(self, x, x_mask, g=None, reverse=False):
147
- if not reverse:
148
- for flow in self.flows:
149
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
- else:
151
- for flow in reversed(self.flows):
152
- x = flow(x, x_mask, g=g, reverse=reverse)
153
- return x
154
-
155
- def remove_weight_norm(self):
156
- for i in range(self.n_flows):
157
- self.flows[i * 2].remove_weight_norm()
158
-
159
-
160
- class PosteriorEncoder(nn.Module):
161
- def __init__(
162
- self,
163
- in_channels,
164
- out_channels,
165
- hidden_channels,
166
- kernel_size,
167
- dilation_rate,
168
- n_layers,
169
- gin_channels=0,
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.hidden_channels = hidden_channels
175
- self.kernel_size = kernel_size
176
- self.dilation_rate = dilation_rate
177
- self.n_layers = n_layers
178
- self.gin_channels = gin_channels
179
-
180
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
- self.enc = modules.WN(
182
- hidden_channels,
183
- kernel_size,
184
- dilation_rate,
185
- n_layers,
186
- gin_channels=gin_channels,
187
- )
188
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
-
190
- def forward(self, x, x_lengths, g=None):
191
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
- x.dtype
193
- )
194
- x = self.pre(x) * x_mask
195
- x = self.enc(x, x_mask, g=g)
196
- stats = self.proj(x) * x_mask
197
- m, logs = torch.split(stats, self.out_channels, dim=1)
198
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
- return z, m, logs, x_mask
200
-
201
- def remove_weight_norm(self):
202
- self.enc.remove_weight_norm()
203
-
204
-
205
- class Generator(torch.nn.Module):
206
- def __init__(
207
- self,
208
- initial_channel,
209
- resblock,
210
- resblock_kernel_sizes,
211
- resblock_dilation_sizes,
212
- upsample_rates,
213
- upsample_initial_channel,
214
- upsample_kernel_sizes,
215
- gin_channels=0,
216
- ):
217
- super(Generator, self).__init__()
218
- self.num_kernels = len(resblock_kernel_sizes)
219
- self.num_upsamples = len(upsample_rates)
220
- self.conv_pre = Conv1d(
221
- initial_channel, upsample_initial_channel, 7, 1, padding=3
222
- )
223
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
-
225
- self.ups = nn.ModuleList()
226
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
- self.ups.append(
228
- weight_norm(
229
- ConvTranspose1d(
230
- upsample_initial_channel // (2**i),
231
- upsample_initial_channel // (2 ** (i + 1)),
232
- k,
233
- u,
234
- padding=(k - u) // 2,
235
- )
236
- )
237
- )
238
-
239
- self.resblocks = nn.ModuleList()
240
- for i in range(len(self.ups)):
241
- ch = upsample_initial_channel // (2 ** (i + 1))
242
- for j, (k, d) in enumerate(
243
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
- ):
245
- self.resblocks.append(resblock(ch, k, d))
246
-
247
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
- self.ups.apply(init_weights)
249
-
250
- if gin_channels != 0:
251
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
-
253
- def forward(self, x, g=None):
254
- x = self.conv_pre(x)
255
- if g is not None:
256
- x = x + self.cond(g)
257
-
258
- for i in range(self.num_upsamples):
259
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
- x = self.ups[i](x)
261
- xs = None
262
- for j in range(self.num_kernels):
263
- if xs is None:
264
- xs = self.resblocks[i * self.num_kernels + j](x)
265
- else:
266
- xs += self.resblocks[i * self.num_kernels + j](x)
267
- x = xs / self.num_kernels
268
- x = F.leaky_relu(x)
269
- x = self.conv_post(x)
270
- x = torch.tanh(x)
271
-
272
- return x
273
-
274
- def remove_weight_norm(self):
275
- for l in self.ups:
276
- remove_weight_norm(l)
277
- for l in self.resblocks:
278
- l.remove_weight_norm()
279
-
280
-
281
- class SineGen(torch.nn.Module):
282
- """Definition of sine generator
283
- SineGen(samp_rate, harmonic_num = 0,
284
- sine_amp = 0.1, noise_std = 0.003,
285
- voiced_threshold = 0,
286
- flag_for_pulse=False)
287
- samp_rate: sampling rate in Hz
288
- harmonic_num: number of harmonic overtones (default 0)
289
- sine_amp: amplitude of sine-wavefrom (default 0.1)
290
- noise_std: std of Gaussian noise (default 0.003)
291
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
- Note: when flag_for_pulse is True, the first time step of a voiced
294
- segment is always sin(np.pi) or cos(0)
295
- """
296
-
297
- def __init__(
298
- self,
299
- samp_rate,
300
- harmonic_num=0,
301
- sine_amp=0.1,
302
- noise_std=0.003,
303
- voiced_threshold=0,
304
- flag_for_pulse=False,
305
- ):
306
- super(SineGen, self).__init__()
307
- self.sine_amp = sine_amp
308
- self.noise_std = noise_std
309
- self.harmonic_num = harmonic_num
310
- self.dim = self.harmonic_num + 1
311
- self.sampling_rate = samp_rate
312
- self.voiced_threshold = voiced_threshold
313
-
314
- def _f02uv(self, f0):
315
- # generate uv signal
316
- uv = torch.ones_like(f0)
317
- uv = uv * (f0 > self.voiced_threshold)
318
- return uv
319
-
320
- def forward(self, f0, upp):
321
- """sine_tensor, uv = forward(f0)
322
- input F0: tensor(batchsize=1, length, dim=1)
323
- f0 for unvoiced steps should be 0
324
- output sine_tensor: tensor(batchsize=1, length, dim)
325
- output uv: tensor(batchsize=1, length, 1)
326
- """
327
- with torch.no_grad():
328
- f0 = f0[:, None].transpose(1, 2)
329
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
- # fundamental component
331
- f0_buf[:, :, 0] = f0[:, :, 0]
332
- for idx in np.arange(self.harmonic_num):
333
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
- idx + 2
335
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
- rand_ini = torch.rand(
338
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
- )
340
- rand_ini[:, 0] = 0
341
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
- tmp_over_one *= upp
344
- tmp_over_one = F.interpolate(
345
- tmp_over_one.transpose(2, 1),
346
- scale_factor=upp,
347
- mode="linear",
348
- align_corners=True,
349
- ).transpose(2, 1)
350
- rad_values = F.interpolate(
351
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
- ).transpose(
353
- 2, 1
354
- ) #######
355
- tmp_over_one %= 1
356
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
- cumsum_shift = torch.zeros_like(rad_values)
358
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
- sine_waves = torch.sin(
360
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
- )
362
- sine_waves = sine_waves * self.sine_amp
363
- uv = self._f02uv(f0)
364
- uv = F.interpolate(
365
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
- ).transpose(2, 1)
367
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
- noise = noise_amp * torch.randn_like(sine_waves)
369
- sine_waves = sine_waves * uv + noise
370
- return sine_waves, uv, noise
371
-
372
-
373
- class SourceModuleHnNSF(torch.nn.Module):
374
- """SourceModule for hn-nsf
375
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
- add_noise_std=0.003, voiced_threshod=0)
377
- sampling_rate: sampling_rate in Hz
378
- harmonic_num: number of harmonic above F0 (default: 0)
379
- sine_amp: amplitude of sine source signal (default: 0.1)
380
- add_noise_std: std of additive Gaussian noise (default: 0.003)
381
- note that amplitude of noise in unvoiced is decided
382
- by sine_amp
383
- voiced_threshold: threhold to set U/V given F0 (default: 0)
384
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
- F0_sampled (batchsize, length, 1)
386
- Sine_source (batchsize, length, 1)
387
- noise_source (batchsize, length 1)
388
- uv (batchsize, length, 1)
389
- """
390
-
391
- def __init__(
392
- self,
393
- sampling_rate,
394
- harmonic_num=0,
395
- sine_amp=0.1,
396
- add_noise_std=0.003,
397
- voiced_threshod=0,
398
- is_half=True,
399
- ):
400
- super(SourceModuleHnNSF, self).__init__()
401
-
402
- self.sine_amp = sine_amp
403
- self.noise_std = add_noise_std
404
- self.is_half = is_half
405
- # to produce sine waveforms
406
- self.l_sin_gen = SineGen(
407
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
- )
409
-
410
- # to merge source harmonics into a single excitation
411
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
- self.l_tanh = torch.nn.Tanh()
413
-
414
- def forward(self, x, upp=None):
415
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
- if self.is_half:
417
- sine_wavs = sine_wavs.half()
418
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
- return sine_merge, None, None # noise, uv
420
-
421
-
422
- class GeneratorNSF(torch.nn.Module):
423
- def __init__(
424
- self,
425
- initial_channel,
426
- resblock,
427
- resblock_kernel_sizes,
428
- resblock_dilation_sizes,
429
- upsample_rates,
430
- upsample_initial_channel,
431
- upsample_kernel_sizes,
432
- gin_channels,
433
- sr,
434
- is_half=False,
435
- ):
436
- super(GeneratorNSF, self).__init__()
437
- self.num_kernels = len(resblock_kernel_sizes)
438
- self.num_upsamples = len(upsample_rates)
439
-
440
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
- self.m_source = SourceModuleHnNSF(
442
- sampling_rate=sr, harmonic_num=0, is_half=is_half
443
- )
444
- self.noise_convs = nn.ModuleList()
445
- self.conv_pre = Conv1d(
446
- initial_channel, upsample_initial_channel, 7, 1, padding=3
447
- )
448
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
-
450
- self.ups = nn.ModuleList()
451
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
- c_cur = upsample_initial_channel // (2 ** (i + 1))
453
- self.ups.append(
454
- weight_norm(
455
- ConvTranspose1d(
456
- upsample_initial_channel // (2**i),
457
- upsample_initial_channel // (2 ** (i + 1)),
458
- k,
459
- u,
460
- padding=(k - u) // 2,
461
- )
462
- )
463
- )
464
- if i + 1 < len(upsample_rates):
465
- stride_f0 = np.prod(upsample_rates[i + 1 :])
466
- self.noise_convs.append(
467
- Conv1d(
468
- 1,
469
- c_cur,
470
- kernel_size=stride_f0 * 2,
471
- stride=stride_f0,
472
- padding=stride_f0 // 2,
473
- )
474
- )
475
- else:
476
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
-
478
- self.resblocks = nn.ModuleList()
479
- for i in range(len(self.ups)):
480
- ch = upsample_initial_channel // (2 ** (i + 1))
481
- for j, (k, d) in enumerate(
482
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
- ):
484
- self.resblocks.append(resblock(ch, k, d))
485
-
486
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
- self.ups.apply(init_weights)
488
-
489
- if gin_channels != 0:
490
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
-
492
- self.upp = np.prod(upsample_rates)
493
-
494
- def forward(self, x, f0, g=None):
495
- har_source, noi_source, uv = self.m_source(f0, self.upp)
496
- har_source = har_source.transpose(1, 2)
497
- x = self.conv_pre(x)
498
- if g is not None:
499
- x = x + self.cond(g)
500
-
501
- for i in range(self.num_upsamples):
502
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
- x = self.ups[i](x)
504
- x_source = self.noise_convs[i](har_source)
505
- x = x + x_source
506
- xs = None
507
- for j in range(self.num_kernels):
508
- if xs is None:
509
- xs = self.resblocks[i * self.num_kernels + j](x)
510
- else:
511
- xs += self.resblocks[i * self.num_kernels + j](x)
512
- x = xs / self.num_kernels
513
- x = F.leaky_relu(x)
514
- x = self.conv_post(x)
515
- x = torch.tanh(x)
516
- return x
517
-
518
- def remove_weight_norm(self):
519
- for l in self.ups:
520
- remove_weight_norm(l)
521
- for l in self.resblocks:
522
- l.remove_weight_norm()
523
-
524
-
525
- sr2sr = {
526
- "32k": 32000,
527
- "40k": 40000,
528
- "48k": 48000,
529
- }
530
-
531
-
532
- class SynthesizerTrnMsNSFsidM(nn.Module):
533
- def __init__(
534
- self,
535
- spec_channels,
536
- segment_size,
537
- inter_channels,
538
- hidden_channels,
539
- filter_channels,
540
- n_heads,
541
- n_layers,
542
- kernel_size,
543
- p_dropout,
544
- resblock,
545
- resblock_kernel_sizes,
546
- resblock_dilation_sizes,
547
- upsample_rates,
548
- upsample_initial_channel,
549
- upsample_kernel_sizes,
550
- spk_embed_dim,
551
- gin_channels,
552
- sr,
553
- version,
554
- **kwargs
555
- ):
556
- super().__init__()
557
- if type(sr) == type("strr"):
558
- sr = sr2sr[sr]
559
- self.spec_channels = spec_channels
560
- self.inter_channels = inter_channels
561
- self.hidden_channels = hidden_channels
562
- self.filter_channels = filter_channels
563
- self.n_heads = n_heads
564
- self.n_layers = n_layers
565
- self.kernel_size = kernel_size
566
- self.p_dropout = p_dropout
567
- self.resblock = resblock
568
- self.resblock_kernel_sizes = resblock_kernel_sizes
569
- self.resblock_dilation_sizes = resblock_dilation_sizes
570
- self.upsample_rates = upsample_rates
571
- self.upsample_initial_channel = upsample_initial_channel
572
- self.upsample_kernel_sizes = upsample_kernel_sizes
573
- self.segment_size = segment_size
574
- self.gin_channels = gin_channels
575
- # self.hop_length = hop_length#
576
- self.spk_embed_dim = spk_embed_dim
577
- if version == "v1":
578
- self.enc_p = TextEncoder256(
579
- inter_channels,
580
- hidden_channels,
581
- filter_channels,
582
- n_heads,
583
- n_layers,
584
- kernel_size,
585
- p_dropout,
586
- )
587
- else:
588
- self.enc_p = TextEncoder768(
589
- inter_channels,
590
- hidden_channels,
591
- filter_channels,
592
- n_heads,
593
- n_layers,
594
- kernel_size,
595
- p_dropout,
596
- )
597
- self.dec = GeneratorNSF(
598
- inter_channels,
599
- resblock,
600
- resblock_kernel_sizes,
601
- resblock_dilation_sizes,
602
- upsample_rates,
603
- upsample_initial_channel,
604
- upsample_kernel_sizes,
605
- gin_channels=gin_channels,
606
- sr=sr,
607
- is_half=kwargs["is_half"],
608
- )
609
- self.enc_q = PosteriorEncoder(
610
- spec_channels,
611
- inter_channels,
612
- hidden_channels,
613
- 5,
614
- 1,
615
- 16,
616
- gin_channels=gin_channels,
617
- )
618
- self.flow = ResidualCouplingBlock(
619
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
620
- )
621
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
622
- self.speaker_map = None
623
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
624
-
625
- def remove_weight_norm(self):
626
- self.dec.remove_weight_norm()
627
- self.flow.remove_weight_norm()
628
- self.enc_q.remove_weight_norm()
629
-
630
- def construct_spkmixmap(self, n_speaker):
631
- self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
632
- for i in range(n_speaker):
633
- self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
634
- self.speaker_map = self.speaker_map.unsqueeze(0)
635
-
636
- def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
637
- if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
638
- g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
639
- g = g * self.speaker_map # [N, S, B, 1, H]
640
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
641
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
642
- else:
643
- g = g.unsqueeze(0)
644
- g = self.emb_g(g).transpose(1, 2)
645
-
646
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
647
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
648
- z = self.flow(z_p, x_mask, g=g, reverse=True)
649
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
650
- return o
651
-
652
-
653
- class MultiPeriodDiscriminator(torch.nn.Module):
654
- def __init__(self, use_spectral_norm=False):
655
- super(MultiPeriodDiscriminator, self).__init__()
656
- periods = [2, 3, 5, 7, 11, 17]
657
- # periods = [3, 5, 7, 11, 17, 23, 37]
658
-
659
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
660
- discs = discs + [
661
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
662
- ]
663
- self.discriminators = nn.ModuleList(discs)
664
-
665
- def forward(self, y, y_hat):
666
- y_d_rs = [] #
667
- y_d_gs = []
668
- fmap_rs = []
669
- fmap_gs = []
670
- for i, d in enumerate(self.discriminators):
671
- y_d_r, fmap_r = d(y)
672
- y_d_g, fmap_g = d(y_hat)
673
- # for j in range(len(fmap_r)):
674
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
675
- y_d_rs.append(y_d_r)
676
- y_d_gs.append(y_d_g)
677
- fmap_rs.append(fmap_r)
678
- fmap_gs.append(fmap_g)
679
-
680
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
681
-
682
-
683
- class MultiPeriodDiscriminatorV2(torch.nn.Module):
684
- def __init__(self, use_spectral_norm=False):
685
- super(MultiPeriodDiscriminatorV2, self).__init__()
686
- # periods = [2, 3, 5, 7, 11, 17]
687
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
688
-
689
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
690
- discs = discs + [
691
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
692
- ]
693
- self.discriminators = nn.ModuleList(discs)
694
-
695
- def forward(self, y, y_hat):
696
- y_d_rs = [] #
697
- y_d_gs = []
698
- fmap_rs = []
699
- fmap_gs = []
700
- for i, d in enumerate(self.discriminators):
701
- y_d_r, fmap_r = d(y)
702
- y_d_g, fmap_g = d(y_hat)
703
- # for j in range(len(fmap_r)):
704
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
705
- y_d_rs.append(y_d_r)
706
- y_d_gs.append(y_d_g)
707
- fmap_rs.append(fmap_r)
708
- fmap_gs.append(fmap_g)
709
-
710
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
711
-
712
-
713
- class DiscriminatorS(torch.nn.Module):
714
- def __init__(self, use_spectral_norm=False):
715
- super(DiscriminatorS, self).__init__()
716
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
717
- self.convs = nn.ModuleList(
718
- [
719
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
720
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
721
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
722
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
723
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
724
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
725
- ]
726
- )
727
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
728
-
729
- def forward(self, x):
730
- fmap = []
731
-
732
- for l in self.convs:
733
- x = l(x)
734
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
735
- fmap.append(x)
736
- x = self.conv_post(x)
737
- fmap.append(x)
738
- x = torch.flatten(x, 1, -1)
739
-
740
- return x, fmap
741
-
742
-
743
- class DiscriminatorP(torch.nn.Module):
744
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
745
- super(DiscriminatorP, self).__init__()
746
- self.period = period
747
- self.use_spectral_norm = use_spectral_norm
748
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
749
- self.convs = nn.ModuleList(
750
- [
751
- norm_f(
752
- Conv2d(
753
- 1,
754
- 32,
755
- (kernel_size, 1),
756
- (stride, 1),
757
- padding=(get_padding(kernel_size, 1), 0),
758
- )
759
- ),
760
- norm_f(
761
- Conv2d(
762
- 32,
763
- 128,
764
- (kernel_size, 1),
765
- (stride, 1),
766
- padding=(get_padding(kernel_size, 1), 0),
767
- )
768
- ),
769
- norm_f(
770
- Conv2d(
771
- 128,
772
- 512,
773
- (kernel_size, 1),
774
- (stride, 1),
775
- padding=(get_padding(kernel_size, 1), 0),
776
- )
777
- ),
778
- norm_f(
779
- Conv2d(
780
- 512,
781
- 1024,
782
- (kernel_size, 1),
783
- (stride, 1),
784
- padding=(get_padding(kernel_size, 1), 0),
785
- )
786
- ),
787
- norm_f(
788
- Conv2d(
789
- 1024,
790
- 1024,
791
- (kernel_size, 1),
792
- 1,
793
- padding=(get_padding(kernel_size, 1), 0),
794
- )
795
- ),
796
- ]
797
- )
798
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
799
-
800
- def forward(self, x):
801
- fmap = []
802
-
803
- # 1d to 2d
804
- b, c, t = x.shape
805
- if t % self.period != 0: # pad first
806
- n_pad = self.period - (t % self.period)
807
- x = F.pad(x, (0, n_pad), "reflect")
808
- t = t + n_pad
809
- x = x.view(b, c, t // self.period, self.period)
810
-
811
- for l in self.convs:
812
- x = l(x)
813
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
814
- fmap.append(x)
815
- x = self.conv_post(x)
816
- fmap.append(x)
817
- x = torch.flatten(x, 1, -1)
818
-
819
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Callbreak Ludo Rummy 29 Amp Juegos De Cartas Solitario Apk Descargar.md DELETED
@@ -1,151 +0,0 @@
1
-
2
- <h1>Callbreak Ludo Rummy 29 & Solitario juegos de cartas APK Descargar</h1>
3
- <p>Si te gusta jugar juegos de cartas y juegos de mesa en su dispositivo móvil, entonces usted debe definitivamente echa un vistazo Callbreak Ludo Rummy 29 & Solitario Juegos de Cartas APK. Esta aplicación es desarrollada por Yarsa Games, un popular estudio de juegos con sede en Nepal. Te ofrece ocho juegos diferentes en un solo paquete. Puedes disfrutar de Callbreak, Ludo, Rummy, 29, Solitaire, Kitti, Dhumbal y Jutpatti en cualquier momento y en cualquier lugar. Ya sea que quieras jugar solo o con tus amigos en línea o fuera de línea puedes divertirte con esta aplicación. En este artículo te mostraremos cómo descargar e instalar la aplicación, así como cómo jugar cada juego en ella. También destacaremos algunas de las características y beneficios de esta aplicación y responderemos algunas preguntas frecuentes sobre ella. </p>
4
- <h2>Cómo descargar e instalar la aplicación</h2>
5
- <p>Descargar e instalar Callbreak Ludo Rummy 29 & Solitario juegos de cartas APK es muy fácil. Usted tiene dos opciones para hacerlo:</p>
6
- <h2>callbreak ludo rummy 29 amp; juegos de cartas solitario apk descargar</h2><br /><p><b><b>Download Zip</b> &#10003; <a href="https://bltlly.com/2v6MKL">https://bltlly.com/2v6MKL</a></b></p><br /><br />
7
- <ol>
8
- <li>Descárgalo desde Google Play Store. Solo busca "Callbreak Ludo" en la tienda o haz clic en este enlace . Luego toca el botón "Instalar" y espera a que la aplicación se descargue e instale en tu dispositivo. </li>
9
- <li>Descargarlo desde el archivo APK. Si desea descargar el archivo APK directamente puede hacer clic en este enlace . Luego debe habilitar la opción "Fuentes desconocidas" en la configuración del dispositivo para permitir la instalación de aplicaciones desde fuentes distintas de Google Play Store. Después de eso, puede abrir el archivo APK y seguir las instrucciones para instalar la aplicación. </li>
10
- </ol>
11
- <h2>Cómo jugar los juegos en la aplicación</h2>
12
- <h3>Callbreak</h3>
13
- <p>Callbreak es un juego de cartas que es muy popular en Nepal e India. Lo juegan cuatro jugadores en dos equipos. Cada jugador recibe 13 cartas de un mazo de 52 cartas estándar. El juego consta de cinco rondas y cada ronda tiene 13 trucos. El primer repartidor es elegido al azar y luego el turno de repartir gira en el sentido de las agujas del reloj. El repartidor reparte todas las cartas una por una a cada jugador. </p>
14
-
15
- <p>El jugador a la izquierda del repartidor juega la primera carta del primer truco. El palo de esta carta se convierte en el palo de triunfo para esa ronda. Los otros jugadores tienen que seguir el ejemplo si tienen una carta del mismo palo. Si no tienen una carta del mismo palo, pueden jugar cualquier carta de su elección. El jugador que juega la carta más alta del palo de triunfo gana el truco. Si no se juega ninguna carta de triunfo, el jugador que juega la carta más alta del palo que se llevó gana el truco. El ganador de un truco lidera el siguiente truco. </p>
16
- <p>Al final de cada ronda, la puntuación de cada jugador se calcula en función de su oferta y el número de trucos que ganó. Si un jugador gana al menos tantos trucos como su oferta, obtiene una puntuación positiva igual a su oferta. Si un jugador gana menos trucos que su oferta, obtiene una puntuación negativa igual a su oferta. Si un jugador gana más trucos que su oferta, obtiene una puntuación positiva igual a su oferta más 0.1 punto por cada truco adicional. Por ejemplo, si un jugador ofrece 4 y gana 5 trucos, su puntuación es 4 + 0.1 = 4.1 puntos. </p>
17
- <p>El juego termina después de cinco rondas y el equipo con la mayor puntuación total gana. </p>
18
- <h3>Ludo</h3>
19
- <p>Ludo es un clásico juego de mesa que se deriva de un antiguo juego indio llamado Pachisi. Se juega de dos a cuatro jugadores en un tablero con cuatro áreas de color: rojo, verde, amarillo y azul. Cada jugador tiene cuatro fichas de su color que comienzan en su área de origen. El objetivo del juego es mover las cuatro fichas alrededor del tablero y en su área de destino antes que los otros jugadores. </p>
20
- <p>El juego se juega con un solo dado que determina cuántos espacios puede mover un token en el tablero. Cada jugador lanza el dado a su vez y mueve una de sus fichas de acuerdo con el número que se muestra en el dado. Un token solo puede entrar en el tablero si el die muestra un seis o si no hay tokens en el área de inicio. Un token solo puede entrar en el área de destino si completa un circuito completo alrededor de la placa. </p>
21
-
22
- <p>Un jugador puede volver a lanzar el dado si lanza un seis o si captura otro token. Un jugador también puede elegir saltarse su turno si no puede o no quiere mover ninguna de sus fichas. </p>
23
- <p>El juego termina cuando un jugador mueve las cuatro fichas en su área de destino y declara "Ludo!". </p>
24
- <p></p> <h3>Rummy</h3>
25
- <p>Rummy es un juego de cartas que se juega entre dos y seis jugadores con una o dos barajas estándar de 52 cartas. El objetivo del juego es formar conjuntos válidos y secuencias de cartas y deshacerse de todas las cartas en su mano. Un conjunto es un grupo de tres o cuatro cartas del mismo rango, como 7-7-7 o Q-Q-Q-Q-Q. Una secuencia es un grupo de tres o más cartas del mismo palo en orden consecutivo, como 4-5-6 de corazones o 10-J-Q-K de picas. Una carta comodín puede usarse como sustituto de cualquier carta en un conjunto o secuencia. </p>
26
- <p>El juego comienza con cada jugador que recibe 13 cartas por el repartidor, que es elegido al azar. Las cartas restantes se colocan boca abajo en la mesa como la pila de existencias. La carta superior de la pila principal se pone boca arriba y se coloca junto a ella como la pila de descarte. El jugador a la izquierda del repartidor juega primero y luego el turno pasa en el sentido de las agujas del reloj. </p>
27
- <p>En tu turno, tienes que sacar una carta de la pila de reserva o de la pila de descarte y luego desechar una carta de tu mano a la pila de descarte. También puedes declarar tu mano si has formado todos los conjuntos y secuencias necesarios y te queda una carta para descartar. Esto se llama "salir" o "mostrar". </p>
28
- <p>Cuando un jugador sale, la ronda termina y la puntuación de cada jugador se calcula en función del valor de las cartas que quedan en su mano. Las cartas (J, Q, K) tienen un valor de 10 puntos cada una, el as tiene un valor de 1 punto, y las otras cartas tienen un valor igual a su rango. El comodín no tiene valor. El jugador que sale obtiene cero puntos y los otros jugadores obtienen puntos positivos. El jugador con la puntuación total más baja al final del juego gana. </p>
29
- <h3>29</h3>
30
-
31
- <p>Antes del inicio de cada ronda, hay una fase de puja donde cada jugador tiene que hacer una puja, que es el número de puntos que espera que su equipo gane en esa ronda. La oferta puede ser cualquier número de 15 a 28 o "pase". La oferta mínima es de 15 y la oferta máxima es de 28. El jugador a la izquierda del repartidor hace la primera puja y luego la puja va en el sentido de las agujas del reloj. Cada jugador puede pasar o subir la puja por al menos un punto. La puja termina cuando tres jugadores pasan consecutivamente. El último jugador que hizo una oferta se convierte en el declarante y su pareja se convierte en el maniquí. </p>
32
- <p>El declarante tiene que elegir un traje de triunfo para esa ronda entre picas, corazones, diamantes o tréboles. El palo de triunfo tiene una clasificación especial de cartas: J (alta), 9, A, 10, K, Q, 8 (baja). Los otros palos tienen una clasificación normal de cartas: A (alta), K, Q, J, 10, 9, 8 (baja). El declarante también puede elegir "no trump", lo que significa que no hay un traje de triunfo y todos los trajes tienen un ranking normal. </p>
33
- <p>El jugador a la izquierda del repartidor juega la primera carta del primer truco. Los otros jugadores tienen que seguir el ejemplo si tienen una carta del mismo palo. Si no tienen una carta del mismo palo, pueden jugar cualquier carta de su elección. El jugador que juega la carta más alta del palo de triunfo gana el truco. Si no se juega ninguna carta de triunfo, el jugador que juega la carta más alta del palo que se llevó gana el truco. El ganador de un truco lidera el siguiente truco. </p>
34
-
35
- <p>El juego termina después de 28 rondas y el equipo con la mayor puntuación total gana. </p>
36
- <h3>Solitario</h3>
37
- <p>Solitario es un juego de cartas que es jugado por un jugador con una baraja estándar de 52 cartas. El objetivo del juego es ordenar todas las cartas en cuatro montones de acuerdo a sus palos y rangos. El juego tiene siete columnas de cartas en la mesa y cuatro cimientos vacíos en la parte superior. </p>
38
- <p>El juego comienza con 28 cartas que se reparten boca abajo en siete columnas de izquierda a derecha. La primera columna tiene una carta, la segunda columna tiene dos cartas, y así sucesivamente hasta que la séptima columna tiene siete cartas. La carta de la parte superior de cada columna se pone boca arriba. Las 24 cartas restantes se colocan boca abajo en la mesa como una pila. </p>
39
- <p>En tu turno, puedes mover una o más cartas de una columna a otra si forman una secuencia descendente de colores alternados. Por ejemplo, puede mover un 6 rojo y un 5 negro juntos a un 7 rojo. También puede mover una sola carta a una columna vacía. Puede voltear la tarjeta boca abajo de cualquier columna si no hay una tarjeta boca arriba en ella. </p>
40
- <p>También puede mover una o más cartas de una columna a una fundación si forman una secuencia ascendente del mismo palo. Por ejemplo, puedes mover un as de picas sobre una base vacía o un dos de picas sobre un as de picas sobre una base. También puede mover una sola carta de la pila de valores a una fundación si cabe. </p>
41
- <p>Puedes sacar una o tres cartas de la pila de reserva dependiendo de tu preferencia y colocarlas boca arriba en la mesa como la pila de residuos. Usted puede mover la tarjeta superior de la pila de residuos a una columna o una fundación si cabe. También puede reciclar la pila de residuos de nuevo a la pila de existencias cuando está vacío. </p>
42
- <p>El juego termina cuando has movido las 52 cartas a las fundaciones o cuando no te quedan movimientos. </p>
43
- <h3>Kitti</h3>
44
-
45
- <p>El juego comienza con cada jugador siendo repartido siete cartas por el repartidor, que es elegido al azar. Las cartas restantes se colocan boca abajo en la mesa como la pila kitti. La carta superior de la pila de gatitos se pone boca arriba y se coloca junto a ella como la carta abierta. El jugador a la izquierda del repartidor juega primero y luego el turno pasa en el sentido de las agujas del reloj. </p>
46
- <p>En tu turno, tienes que sacar una carta de la pila de gatitos o de la carta abierta y luego descartar una carta de tu mano a la carta abierta. También puede mostrar su mano si ha formado todos los grupos necesarios y le queda una carta para descartar. Esto se llama "salir" o "mostrar". </p>
47
- <p>Cuando un jugador sale, la ronda termina y la puntuación de cada jugador se calcula en función del valor de las cartas que quedan en su mano. Las cartas (J, Q, K) tienen un valor de 10 puntos cada una, el as tiene un valor de 1 punto, y las otras cartas tienen un valor igual a su rango. El comodín no tiene valor. El jugador que sale obtiene cero puntos y los otros jugadores obtienen puntos positivos. El jugador con la puntuación total más baja al final del juego gana. </p>
48
- <h3>Dhumbal</h3>
49
- <p>Dhumbal es un juego de cartas que es jugado por tres a seis jugadores con una baraja estándar de 52 cartas. El objetivo del juego es tener la puntuación más baja al final de cada ronda. El juego tiene 10 rondas y cada ronda tiene una puntuación objetivo diferente que los jugadores tienen que permanecer por debajo. </p>
50
- <p>El juego comienza con cada jugador siendo repartido cuatro cartas por el repartidor, que es elegido al azar. Las cartas restantes se colocan boca abajo en la mesa como la pila de existencias. La carta superior de la pila principal se pone boca arriba y se coloca junto a ella como la pila de descarte. El jugador a la izquierda del repartidor juega primero y luego el turno pasa en el sentido de las agujas del reloj. </p>
51
-
52
- <p>Cuando un jugador sale, la ronda termina y la puntuación de cada jugador se calcula en función del valor de sus cartas. Las cartas (J, Q, K) tienen un valor de 10 puntos cada una, el as tiene un valor de 11 puntos, y las otras cartas tienen un valor igual a su rango. El comodín tiene un valor de cero puntos. El jugador que sale obtiene una puntuación igual a la suma de sus cartas y los otros jugadores obtienen una puntuación igual a la suma de sus cartas más 10 puntos de penalización. Si dos o más jugadores salen en el mismo turno, el jugador con la puntuación más baja gana esa ronda y los otros jugadores obtienen 10 puntos de penalización. </p>
53
- <p>La puntuación objetivo para cada ronda es la siguiente:</p>
54
- <tabla>
55
- <tr><th>Round</th><th>Puntuación del objetivo</th></tr>
56
- <tr><td>1</td><td>7</td></tr>
57
- <tr><td>2</td><td>9</td></tr>
58
- <tr><td>3</td><td>11</td></tr>
59
- <tr><td>4</td><td>13</td></tr>
60
- <tr><td>5</td><td>15</td></tr>
61
- <tr><td>6</td><td>17</td></tr>
62
- <tr><td>7</td><td>19</td></tr>
63
- <tr><td>8</td><td>21</td></tr>
64
- <tr><td>9</td><td>23</td></tr>
65
- <tr><td>10</td><td>25</td></tr>
66
- </tabla>
67
- <p>Si un jugador supera la puntuación objetivo en cualquier ronda, es eliminado del juego. El juego termina después de 10 rondas o cuando solo queda un jugador. El jugador con la puntuación total más baja al final del juego gana. </p>
68
- <h3>Jutpatti</h3>
69
- <p>Jutpatti es un juego de cartas que se juega entre dos y cuatro jugadores con una baraja estándar de 52 cartas. El objetivo del juego es recoger pares de cartas y ser el primero en deshacerse de todas las cartas en su mano. Un par son dos cartas del mismo rango, como 7-7 o Q-Q. Una carta comodín puede ser utilizada como un sustituto de cualquier carta en un par. </p>
70
- <p>El juego comienza con cada jugador siendo repartido cinco cartas por el repartidor, que es elegido al azar. Las cartas restantes se colocan boca abajo en la mesa como la pila de existencias. La carta de la parte superior de la pila se pone boca arriba y se coloca junto a ella como la carta abierta. El jugador a la izquierda del repartidor juega primero y luego el turno pasa en el sentido de las agujas del reloj. </p>
71
-
72
- <p>Cuando un jugador sale, la ronda termina y la puntuación de cada jugador se calcula en función del número de parejas que tienen en su mano. Cada pareja vale un punto y cada comodín vale la mitad de un punto. El jugador que sale obtiene cero puntos y los otros jugadores obtienen puntos negativos. El jugador con la puntuación total más alta al final del juego gana. </p>
73
- <h2>Características y beneficios de la aplicación</h2>
74
- <p>Callbreak Ludo Rummy 29 & Solitario Juegos de Cartas APK no es solo una aplicación ordinaria que le ofrece ocho juegos diferentes en un paquete. También tiene algunas características y beneficios increíbles que lo hacen destacar de otras aplicaciones similares. Estos son algunos de ellos:</p>
75
- <ul>
76
- <li>La aplicación tiene gráficos de alta calidad y efectos de sonido que mejoran su experiencia de juego. </li>
77
- <li>La aplicación tiene un modo sin conexión que le permite jugar sin conexión a Internet. </li>
78
- <li> La aplicación tiene un modo multijugador que le permite jugar con sus amigos en línea o fuera de línea. </li>
79
- <li> La aplicación tiene una opción de chat que le permite comunicarse con otros jugadores durante el juego. </li>
80
- <li>La aplicación tiene tablas de clasificación que muestran su ranking y logros entre otros jugadores. </li>
81
- <li> La aplicación tiene varios ajustes que le permiten personalizar sus preferencias de juego como el idioma, tema, nivel de dificultad, etc.</li>
82
- <li>La aplicación tiene actualizaciones regulares que agregan nuevas características y corrigen errores. </li>
83
- <li>La aplicación es gratuita para descargar y jugar. </li>
84
- </ul>
85
- <h2>Preguntas frecuentes sobre la aplicación</h2>
86
- <p>Si tiene alguna pregunta o duda sobre Callbreak Ludo Rummy 29 & Solitario Juegos de Cartas APK, usted puede encontrar las respuestas aquí. Hemos recopilado algunas de las preguntas más frecuentes sobre la aplicación y sus respuestas:</p>
87
- <ol>
88
- <li>¿Cómo puedo actualizar la aplicación? </li>
89
- <p>Puedes actualizar la aplicación siguiendo estos pasos:</p>
90
- <ul>
91
- <li>Abre Google Play Store en tu dispositivo. </li>
92
- <li>Buscar "Callbreak Ludo" o haga clic en este enlace . </li>
93
- <li>Toque en el botón "Actualizar" y espere a que la aplicación se actualice. </li>
94
- </ul>
95
-
96
- <p>Puede ponerse en contacto con el equipo de soporte siguiendo estos pasos:</p>
97
- <ul>
98
- <li>Abra la aplicación en su dispositivo. </li>
99
- <li>Toque en el icono del menú en la esquina superior izquierda de la pantalla. </li>
100
- <li>Toque en la opción "Ayuda y soporte". </li>
101
- <li>Rellene su nombre, correo electrónico y mensaje y toque en "Enviar" botón. </li>
102
- </ul>
103
- <li>¿Cómo puedo jugar con mis amigos en línea? </li>
104
- <p>Puedes jugar con tus amigos online siguiendo estos pasos:</p>
105
- <ul>
106
- <li>Abra la aplicación en su dispositivo. </li>
107
- <li>Seleccione el juego que desea jugar desde la pantalla de inicio. </li>
108
- <li>Toque en la opción "Multijugador" y luego en la opción "Online". </li>
109
- <li>Crear una habitación o unirse a una habitación existente introduciendo el código de la habitación. </li>
110
- <li>Invita a tus amigos a unirse a la habitación compartiendo el código de la habitación con ellos. </li>
111
- <li>Comienza el juego y disfruta jugando con tus amigos. </li>
112
- </ul>
113
- <li>¿Cómo puedo cambiar el idioma de la aplicación? </li>
114
- <p>Puedes cambiar el idioma de la aplicación siguiendo estos pasos:</p>
115
- <ul>
116
- <li>Abra la aplicación en su dispositivo. </li>
117
- <li>Toque en el icono del menú en la esquina superior izquierda de la pantalla. </li>
118
- <li>Toque en la opción "Configuración". </li>
119
- <li>Toque en la opción "Idioma" y seleccione su idioma preferido de la lista. </li>
120
- </ul> <li>¿Cómo puedo jugar sin conexión? </li>
121
- <p>Puedes jugar sin conexión siguiendo estos pasos:</p>
122
- <ul>
123
- <li>Abra la aplicación en su dispositivo. </li>
124
- <li>Seleccione el juego que desea jugar desde la pantalla de inicio. </li>
125
- <li>Toque en la opción "Multijugador" y luego en la opción "Offline". </li>
126
- <li>Seleccione el número de jugadores y el nivel de dificultad de los jugadores de la computadora. </li>
127
- <li>Comienza el juego y disfruta jugando offline. </li>
128
- </ul>
129
- <h2>Conclusión</h2>
130
-
131
- <p>Entonces, ¿qué estás esperando? Descargar Callbreak Ludo Rummy 29 & Solitario juegos de cartas APK ahora y divertirse jugando! </p>
132
- <h2>Preguntas frecuentes</h2>
133
- <ol>
134
- <li> ¿Cuáles son los requisitos mínimos para ejecutar la aplicación? </li>
135
- <p>La aplicación requiere Android 4.4 o superior y al menos 50 MB de espacio libre en su dispositivo. </p>
136
- <li> ¿Es la aplicación segura para descargar y usar? </li>
137
- <p>Sí, la aplicación es segura de descargar y usar. No contiene ningún virus o malware. Tampoco recopila ninguna información personal o confidencial de usted. </p>
138
- <li>¿Puedo jugar con dinero real en la aplicación? </li>
139
- <p>No, la aplicación no admite transacciones con dinero real o apuestas. Es solo para fines de entretenimiento. </p>
140
- <li>¿Puedo personalizar mi avatar y perfil en la aplicación? </li>
141
- <p>Sí, puedes personalizar tu avatar y perfil en la aplicación. Puedes elegir entre diferentes avatares, fondos, marcos y pegatinas. También puede editar su nombre, país y estado. </p>
142
- <li>¿Cómo puedo dar comentarios o sugerencias para la aplicación? </li>
143
- <p>Puedes dar comentarios o sugerencias para la aplicación siguiendo estos pasos:</p>
144
- <ul>
145
- <li>Abra la aplicación en su dispositivo. </li>
146
- <li>Toque en el icono del menú en la esquina superior izquierda de la pantalla. </li>
147
- <li>Toque en la opción "Feedback". </li>
148
- <li>Rellene su nombre, correo electrónico, calificación y mensaje y toque en "Enviar" botón. </li>
149
- </ul></p> 64aa2da5cf<br />
150
- <br />
151
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Apk Mod Zombi Caminando 2.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Descargar APK Mod Walking Zombie 2: Una guía para los amantes de los zombis</h1>
3
- <p>Si eres un fan de los juegos de zombies, probablemente hayas oído hablar de Walking Zombie 2, un popular shooter en primera persona con elementos RPG. El juego se desarrolla en un mundo post-apocalíptico donde tienes que luchar contra hordas de zombies, bandidos y monstruos jefes. También tienes que completar misiones, mejorar tus habilidades y beneficios, comprar y vender equipos, e interactuar con otros sobrevivientes. </p>
4
- <h2>descargar apk mod zombi caminando 2</h2><br /><p><b><b>Download File</b> &#9733; <a href="https://bltlly.com/2v6KPU">https://bltlly.com/2v6KPU</a></b></p><br /><br />
5
- <p>Walking Zombie 2 es un juego divertido y desafiante que puedes jugar sin conexión a Internet. Sin embargo, si desea mejorar su experiencia de juego, es posible que desee probar la descarga de un mod APK para Walking Zombie 2. En este artículo, explicaremos lo que es un mod APK, cómo instalarlo, qué características ofrece, y algunos consejos y trucos para jugar Walking Zombie 2. Vamos a empezar! </p>
6
- <h2> ¿Qué es un mod APK y cómo instalarlo</h2>
7
- <p>Un mod APK es una versión modificada de una aplicación original o juego que ha sido alterado por alguien para agregar o eliminar algunas características. Por ejemplo, un mod de APK para Walking Zombie 2 podría darte dinero ilimitado, munición, salud u otros beneficios que no están disponibles en el juego original. </p>
8
- <p>Un mod APK generalmente se descarga como un archivo con la extensión . apk que puede instalar en su dispositivo Android. Sin embargo, antes de instalar un mod APK, es necesario asegurarse de que el dispositivo permite instalar aplicaciones de fuentes desconocidas. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y habilite. </p>
9
- <p>Una vez que haya habilitado fuentes desconocidas, puede descargar un mod APK para Walking Zombie 2 desde un sitio web confiable como [APKMB.Com]( 4 ), que ofrece juegos y aplicaciones modificadas para Android de forma gratuita. Después de descargar el archivo, localizarlo en el administrador de archivos de su dispositivo y toque en él para instalarlo. Es posible que necesite permitir que algunos permisos para que la aplicación se ejecute correctamente. </p>
10
- <p></p>
11
- <h3> Cómo descargar APK Mod Walking Zombie 2</h3>
12
-
13
- <ol>
14
- <li>Ir a [APKMB.Com]( 4 ) y buscar Walking Zombie 2 en la barra de búsqueda. </li>
15
- <li>Seleccione la versión del mod que desea descargar. Asegúrese de que es compatible con la versión de Android de su dispositivo. </li>
16
- <li>Haga clic en el botón de descarga y espere a que se descargue el archivo. </li>
17
- <li>Abra el administrador de archivos de su dispositivo y encuentre el archivo descargado. Debería estar en la carpeta Descargas o en una carpeta con el nombre del sitio web. </li>
18
- <li>Toque en el archivo y siga las instrucciones para instalarlo. Es posible que tenga que permitir algunos permisos para que la aplicación se ejecute correctamente. </li>
19
- <li>Inicie la aplicación desde su pantalla de inicio o cajón de aplicaciones y disfrutar jugando Walking Zombie 2 con el mod! </li>
20
- </ol>
21
- <h4>Características de APK Mod Walking Zombie 2</h4>
22
- <p> <p>Dependiendo de la versión del mod que descargues, es posible que obtengas diferentes características y beneficios. Sin embargo, algunas de las características comunes de un mod APK para Walking Zombie 2 son:</p>
23
- <ul>
24
- <li>Dinero ilimitado: Puedes comprar cualquier cosa que quieras en la tienda sin preocuparte por quedarte sin dinero. </li>
25
- <li>Munición ilimitada: Puedes disparar todo lo que quieras sin recargar o quedarse sin balas. </li>
26
- <li>Salud ilimitada: Puedes sobrevivir a cualquier ataque sin perder la salud o morir. </li>
27
- <li>Puntos de habilidad ilimitados: Puedes mejorar tus habilidades y beneficios al nivel máximo sin ganar puntos de experiencia. </li>
28
- <li>Armas y equipos desbloqueados: puedes acceder a todas las armas y equipos del juego sin encontrarlos ni comprarlos. </li>
29
- <li>Sin anuncios: Puedes jugar el juego sin interrupciones o distracciones de los anuncios. </li>
30
- </ul>
31
- <p>Estas características pueden hacer el juego más divertido y fácil para usted, pero también pueden hacer que sea menos desafiante y realista. Por lo tanto, debe utilizar el mod a su propia discreción y riesgo. Algunas personas podrían considerar usar un mod como trampa, mientras que otros podrían disfrutarlo como una forma de explorar las posibilidades del juego. Depende de ti decidir cómo quieres jugar Walking Zombie 2.</p>
32
-
33
- <p>Ya sea que uses un mod o no, Walking Zombie 2 es un juego que requiere un poco de estrategia y habilidad para dominar. Aquí hay algunos consejos y trucos que pueden ayudarle a sobrevivir y disfrutar del juego:</p>
34
- <h3>Cómo subir de nivel tus habilidades y beneficios</h3>
35
- <p>En Walking Zombie 2, puedes mejorar las habilidades de tu personaje nivelando tus habilidades y beneficios. Las habilidades se dividen en cuatro categorías: combate, supervivencia, elaboración y carisma. Cada categoría tiene varias habilidades que afectan diferentes aspectos del juego, tales como daños, precisión, salud, resistencia, velocidad de fabricación, precios de intercambio, etc. Puedes subir de nivel tus habilidades gastando puntos de habilidad que ganas completando misiones y matando enemigos. </p>
36
- <p>Los beneficios son bonos especiales que le dan ventajas adicionales en el juego, tales como mayor probabilidad de golpe crítico, recarga más rápida, mejor saqueo, etc. Puede desbloquear beneficios al alcanzar ciertos niveles de habilidades. Por ejemplo, para desbloquear la ventaja "Headhunter", que aumenta el daño de tu disparo en la cabeza en un 25%, necesitas tener al menos un nivel 5 en la habilidad de combate "Sharpshooter". Solo puedes elegir un beneficio por nivel, así que elige sabiamente. </p>
37
- <p>Algunas de las mejores habilidades y beneficios para invertir son:</p>
38
- <tabla>
39
- <tr><th>Habilidad</th><th>Perk</th><th>Beneficio</th></tr>
40
- <tr><td>Combat > Sharpshooter</td><td>Headhunter</td><td>Aumenta el daño a la cabeza en un 25%</td></tr>
41
- <tr><td>Supervivencia > Dureza</td><td>Sed de sangre</td><td><td>Restaura un 5% de salud por cada muerte</td></tr>
42
- <tr><td>Elaboración > Ingeniero</td><td>Tinkerer</td><td><td>Aumenta la velocidad de fabricación en un 50%</td></tr>
43
- <tr><td>Charisma > Trader</td><td>Bargain Hunter</td><td>Reduce los precios en un 20% en las tiendas</td></tr>
44
- </tabla>
45
- <h4>Cómo encontrar y utilizar las mejores armas y equipos</h4>
46
-
47
- <p>El equipo incluye artículos como armaduras, cascos, guantes, botas, mochilas, etc. que pueden protegerlo de daños y proporcionarle beneficios adicionales como una mayor capacidad de carga, velocidad de movimiento, sigilo, etc. También puede actualizar su equipo agregando mods como placas, almohadillas, bolsillos, etc.</p>
48
- <p>Puedes encontrar armas y equipos saqueando los cadáveres, cofres, cajas, casilleros, etc. de los enemigos o comprándolos en tiendas o comerciantes. Sin embargo, algunas de las mejores armas y equipos están ocultos en lugares secretos que requieren un poco de exploración y resolución de rompecabezas para acceder. Por ejemplo, puedes encontrar un poderoso rifle de francotirador llamado "El Segador" en un búnker oculto cerca de la ciudad de Silver City.</p>
49
- <p>Algunas de las mejores armas y equipos para usar son:</p>
50
- <tabla>
51
- <tr><th>Tipo</th><th>Nombre</th><th>Descripción</th></tr>
52
- <tr><td>Melee</td><td>Katana</td><td>Una espada afilada que puede cortar zombies con facilidad. </td></tr>
53
- <tr><td>Pistola</td><td>Desert Eagle</td><td>Un arma de mano potente que puede causar mucho daño a medio alcance. </td></tr>
54
- <tr><td>Rifle</td><td>El Segador</td><td>Un rifle de francotirador oculto que puede matar a los enemigos con un disparo desde una larga distancia. </td></tr>
55
- <tr><td>Escopeta</td><td>AA-12</td><td><td>Una escopeta automática que puede disparar múltiples disparos en poco tiempo y causar daños masivos a corta distancia. </td></tr>
56
- <tr><td>Armor</td><td>Exoskeleton</td><td>Un traje de alta tecnología que puede protegerte de balas y explosiones y aumentar tu fuerza y velocidad. </td></tr>
57
- <tr><td>Casco</td><td>Gafas de visión nocturna</td><td>Un casco que te permite ver en la oscuridad y resaltar enemigos y objetos. </td></tr>
58
- <tr><td>Guantes</td><td>Guantes de choque</td><td>Guantes que pueden electrocutar a los enemigos y aturdirlos durante unos segundos. </td></tr>
59
- <tr><td>Boots</td><td>Jet Boots</td><td>Botas que te permiten volar por poco tiempo y evitar obstáculos y enemigos. </td></tr>
60
-
61
- </tabla>
62
- <h3>Cómo completar misiones y explorar el mundo</h3>
63
- <p>En Walking Zombie 2, puedes seguir la historia principal y completar varias misiones secundarias que te darán recompensas, puntos de experiencia e información sobre el mundo. Las misiones están marcadas en el mapa con iconos como signos de exclamación, signos de interrogación o estrellas. También puedes hablar con NPCs para obtener misiones o pistas sobre misiones ocultas. Algunas misiones son fáciles y directas, mientras que otras son complejas y requieren múltiples pasos o opciones. </p>
64
- <p>El mundo de Walking Zombie 2 es vasto y diverso, con diferentes regiones, pueblos, bases, campamentos, mazmorras, etc. Puede explorar el mundo caminando, conduciendo o viajando rápidamente a lugares que ha descubierto. También puedes encontrar secretos, huevos de Pascua, objetos de colección y referencias a otros juegos y películas. Por ejemplo, usted puede encontrar un coche DeLorean de vuelta al futuro en un garaje cerca de la ciudad de New Hope.</p>
65
- <p>Algunas de las mejores misiones y ubicaciones para completar y explorar son:</p>
66
- <tabla>
67
- <tr><th>Quest</th><th>Descripción</th></tr>
68
- <tr><td>La última esperanza</td><td>Una búsqueda principal que implica encontrar una cura para el virus zombie y salvar a la humanidad. </td></tr>
69
- <tr><td>La Arena</td><td>Una misión paralela que implica luchar contra oleadas de zombis y enemigos en una arena estilo gladiador. </td></tr>
70
- <tr><td>El misterio de la pirámide</td><td>Una búsqueda oculta que involucra resolver rompecabezas y encontrar pistas en una pirámide antigua. </td></tr>
71
- <tr><th>Ubicación</th><th>Descripción</th></tr>
72
- <tr><td>Nueva esperanza</td><td>Una gran ciudad que sirve como el centro principal del juego, donde se pueden encontrar tiendas, comerciantes, misiones y aliados. </td></tr>
73
- <tr><td>El búnker</td><
74
- |im_end|>d>Una instalación subterránea secreta que contiene tecnología avanzada, armas y secretos. </d></tr>
75
- <tr><d>El cementerio</d>d>Un lugar espeluznante lleno de zombies, fantasmas y lápidas. </d></tr>
76
- </tabla>
77
- <h2>Conclusión: ¿Por qué usted debe descargar APK Mod Walking Zombie 2</h2>
78
-
79
- <h3>Preguntas frecuentes</h3>
80
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre el mod APK para Walking Zombie 2:</p>
81
- <ol>
82
- <li> ¿Es el mod APK para Walking Zombie 2 seguro de usar? </li>
83
- <p>El mod APK para Walking Zombie 2 es seguro de usar siempre y cuando lo descargue de un sitio web de confianza como [ [APKMB.Com]]. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas y escanearlas en busca de virus o malware antes de instalarlas. También deberías hacer una copia de seguridad de tus datos y desinstalar el juego original antes de instalar el mod, ya que podría sobrescribir o borrar tu progreso. </p>
84
- <li>¿Afectará el mod APK para Walking Zombie 2 al rendimiento o la compatibilidad del juego? </li>
85
- <p>El mod APK para Walking Zombie 2 no debe afectar el rendimiento del juego o la compatibilidad, ya que se basa en la misma versión del juego. Sin embargo, algunas características del mod pueden causar algunos fallos o errores en el juego, como estrellarse, congelarse o retrasarse. Si tienes algún problema con el mod, puedes intentar reinstalarlo, borrar la caché o reiniciar el dispositivo. También puede ponerse en contacto con el desarrollador mod o el sitio web para obtener soporte. </p>
86
- <li>¿Puedo jugar online o multijugador con el mod APK para Walking Zombie 2?</li>
87
- <p>El mod APK para Walking Zombie 2 está diseñado para el modo sin conexión y solo para un jugador. No puedes jugar online o multijugador con el mod, ya que podría causar errores o prohibiciones en los servidores del juego. Solo puedes jugar online o multijugador con el juego original, sin mods. </p>
88
- <li>¿Puedo actualizar el mod APK para Walking Zombie 2?</li>
89
- <p>El mod APK para Walking Zombie 2 se actualiza regularmente por el desarrollador mod o el sitio web para que coincida con la última versión del juego. Puede comprobar si hay actualizaciones en el sitio web o en la propia aplicación. Sin embargo, es posible que necesites desinstalar y reinstalar el mod cada vez que haya una nueva actualización, ya que podría no funcionar con versiones anteriores del juego. </p>
90
- <li> ¿Puedo usar otros mods o trucos con el mod APK para Walking Zombie 2?</li>
91
-
92
- </ol></p> 64aa2da5cf<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/logging.py DELETED
@@ -1,348 +0,0 @@
1
- import contextlib
2
- import errno
3
- import logging
4
- import logging.handlers
5
- import os
6
- import sys
7
- import threading
8
- from dataclasses import dataclass
9
- from io import TextIOWrapper
10
- from logging import Filter
11
- from typing import Any, ClassVar, Generator, List, Optional, TextIO, Type
12
-
13
- from pip._vendor.rich.console import (
14
- Console,
15
- ConsoleOptions,
16
- ConsoleRenderable,
17
- RenderableType,
18
- RenderResult,
19
- RichCast,
20
- )
21
- from pip._vendor.rich.highlighter import NullHighlighter
22
- from pip._vendor.rich.logging import RichHandler
23
- from pip._vendor.rich.segment import Segment
24
- from pip._vendor.rich.style import Style
25
-
26
- from pip._internal.utils._log import VERBOSE, getLogger
27
- from pip._internal.utils.compat import WINDOWS
28
- from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX
29
- from pip._internal.utils.misc import ensure_dir
30
-
31
- _log_state = threading.local()
32
- subprocess_logger = getLogger("pip.subprocessor")
33
-
34
-
35
- class BrokenStdoutLoggingError(Exception):
36
- """
37
- Raised if BrokenPipeError occurs for the stdout stream while logging.
38
- """
39
-
40
-
41
- def _is_broken_pipe_error(exc_class: Type[BaseException], exc: BaseException) -> bool:
42
- if exc_class is BrokenPipeError:
43
- return True
44
-
45
- # On Windows, a broken pipe can show up as EINVAL rather than EPIPE:
46
- # https://bugs.python.org/issue19612
47
- # https://bugs.python.org/issue30418
48
- if not WINDOWS:
49
- return False
50
-
51
- return isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE)
52
-
53
-
54
- @contextlib.contextmanager
55
- def indent_log(num: int = 2) -> Generator[None, None, None]:
56
- """
57
- A context manager which will cause the log output to be indented for any
58
- log messages emitted inside it.
59
- """
60
- # For thread-safety
61
- _log_state.indentation = get_indentation()
62
- _log_state.indentation += num
63
- try:
64
- yield
65
- finally:
66
- _log_state.indentation -= num
67
-
68
-
69
- def get_indentation() -> int:
70
- return getattr(_log_state, "indentation", 0)
71
-
72
-
73
- class IndentingFormatter(logging.Formatter):
74
- default_time_format = "%Y-%m-%dT%H:%M:%S"
75
-
76
- def __init__(
77
- self,
78
- *args: Any,
79
- add_timestamp: bool = False,
80
- **kwargs: Any,
81
- ) -> None:
82
- """
83
- A logging.Formatter that obeys the indent_log() context manager.
84
-
85
- :param add_timestamp: A bool indicating output lines should be prefixed
86
- with their record's timestamp.
87
- """
88
- self.add_timestamp = add_timestamp
89
- super().__init__(*args, **kwargs)
90
-
91
- def get_message_start(self, formatted: str, levelno: int) -> str:
92
- """
93
- Return the start of the formatted log message (not counting the
94
- prefix to add to each line).
95
- """
96
- if levelno < logging.WARNING:
97
- return ""
98
- if formatted.startswith(DEPRECATION_MSG_PREFIX):
99
- # Then the message already has a prefix. We don't want it to
100
- # look like "WARNING: DEPRECATION: ...."
101
- return ""
102
- if levelno < logging.ERROR:
103
- return "WARNING: "
104
-
105
- return "ERROR: "
106
-
107
- def format(self, record: logging.LogRecord) -> str:
108
- """
109
- Calls the standard formatter, but will indent all of the log message
110
- lines by our current indentation level.
111
- """
112
- formatted = super().format(record)
113
- message_start = self.get_message_start(formatted, record.levelno)
114
- formatted = message_start + formatted
115
-
116
- prefix = ""
117
- if self.add_timestamp:
118
- prefix = f"{self.formatTime(record)} "
119
- prefix += " " * get_indentation()
120
- formatted = "".join([prefix + line for line in formatted.splitlines(True)])
121
- return formatted
122
-
123
-
124
- @dataclass
125
- class IndentedRenderable:
126
- renderable: RenderableType
127
- indent: int
128
-
129
- def __rich_console__(
130
- self, console: Console, options: ConsoleOptions
131
- ) -> RenderResult:
132
- segments = console.render(self.renderable, options)
133
- lines = Segment.split_lines(segments)
134
- for line in lines:
135
- yield Segment(" " * self.indent)
136
- yield from line
137
- yield Segment("\n")
138
-
139
-
140
- class RichPipStreamHandler(RichHandler):
141
- KEYWORDS: ClassVar[Optional[List[str]]] = []
142
-
143
- def __init__(self, stream: Optional[TextIO], no_color: bool) -> None:
144
- super().__init__(
145
- console=Console(file=stream, no_color=no_color, soft_wrap=True),
146
- show_time=False,
147
- show_level=False,
148
- show_path=False,
149
- highlighter=NullHighlighter(),
150
- )
151
-
152
- # Our custom override on Rich's logger, to make things work as we need them to.
153
- def emit(self, record: logging.LogRecord) -> None:
154
- style: Optional[Style] = None
155
-
156
- # If we are given a diagnostic error to present, present it with indentation.
157
- assert isinstance(record.args, tuple)
158
- if record.msg == "[present-rich] %s" and len(record.args) == 1:
159
- rich_renderable = record.args[0]
160
- assert isinstance(
161
- rich_renderable, (ConsoleRenderable, RichCast, str)
162
- ), f"{rich_renderable} is not rich-console-renderable"
163
-
164
- renderable: RenderableType = IndentedRenderable(
165
- rich_renderable, indent=get_indentation()
166
- )
167
- else:
168
- message = self.format(record)
169
- renderable = self.render_message(record, message)
170
- if record.levelno is not None:
171
- if record.levelno >= logging.ERROR:
172
- style = Style(color="red")
173
- elif record.levelno >= logging.WARNING:
174
- style = Style(color="yellow")
175
-
176
- try:
177
- self.console.print(renderable, overflow="ignore", crop=False, style=style)
178
- except Exception:
179
- self.handleError(record)
180
-
181
- def handleError(self, record: logging.LogRecord) -> None:
182
- """Called when logging is unable to log some output."""
183
-
184
- exc_class, exc = sys.exc_info()[:2]
185
- # If a broken pipe occurred while calling write() or flush() on the
186
- # stdout stream in logging's Handler.emit(), then raise our special
187
- # exception so we can handle it in main() instead of logging the
188
- # broken pipe error and continuing.
189
- if (
190
- exc_class
191
- and exc
192
- and self.console.file is sys.stdout
193
- and _is_broken_pipe_error(exc_class, exc)
194
- ):
195
- raise BrokenStdoutLoggingError()
196
-
197
- return super().handleError(record)
198
-
199
-
200
- class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
201
- def _open(self) -> TextIOWrapper:
202
- ensure_dir(os.path.dirname(self.baseFilename))
203
- return super()._open()
204
-
205
-
206
- class MaxLevelFilter(Filter):
207
- def __init__(self, level: int) -> None:
208
- self.level = level
209
-
210
- def filter(self, record: logging.LogRecord) -> bool:
211
- return record.levelno < self.level
212
-
213
-
214
- class ExcludeLoggerFilter(Filter):
215
-
216
- """
217
- A logging Filter that excludes records from a logger (or its children).
218
- """
219
-
220
- def filter(self, record: logging.LogRecord) -> bool:
221
- # The base Filter class allows only records from a logger (or its
222
- # children).
223
- return not super().filter(record)
224
-
225
-
226
- def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int:
227
- """Configures and sets up all of the logging
228
-
229
- Returns the requested logging level, as its integer value.
230
- """
231
-
232
- # Determine the level to be logging at.
233
- if verbosity >= 2:
234
- level_number = logging.DEBUG
235
- elif verbosity == 1:
236
- level_number = VERBOSE
237
- elif verbosity == -1:
238
- level_number = logging.WARNING
239
- elif verbosity == -2:
240
- level_number = logging.ERROR
241
- elif verbosity <= -3:
242
- level_number = logging.CRITICAL
243
- else:
244
- level_number = logging.INFO
245
-
246
- level = logging.getLevelName(level_number)
247
-
248
- # The "root" logger should match the "console" level *unless* we also need
249
- # to log to a user log file.
250
- include_user_log = user_log_file is not None
251
- if include_user_log:
252
- additional_log_file = user_log_file
253
- root_level = "DEBUG"
254
- else:
255
- additional_log_file = "/dev/null"
256
- root_level = level
257
-
258
- # Disable any logging besides WARNING unless we have DEBUG level logging
259
- # enabled for vendored libraries.
260
- vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
261
-
262
- # Shorthands for clarity
263
- log_streams = {
264
- "stdout": "ext://sys.stdout",
265
- "stderr": "ext://sys.stderr",
266
- }
267
- handler_classes = {
268
- "stream": "pip._internal.utils.logging.RichPipStreamHandler",
269
- "file": "pip._internal.utils.logging.BetterRotatingFileHandler",
270
- }
271
- handlers = ["console", "console_errors", "console_subprocess"] + (
272
- ["user_log"] if include_user_log else []
273
- )
274
-
275
- logging.config.dictConfig(
276
- {
277
- "version": 1,
278
- "disable_existing_loggers": False,
279
- "filters": {
280
- "exclude_warnings": {
281
- "()": "pip._internal.utils.logging.MaxLevelFilter",
282
- "level": logging.WARNING,
283
- },
284
- "restrict_to_subprocess": {
285
- "()": "logging.Filter",
286
- "name": subprocess_logger.name,
287
- },
288
- "exclude_subprocess": {
289
- "()": "pip._internal.utils.logging.ExcludeLoggerFilter",
290
- "name": subprocess_logger.name,
291
- },
292
- },
293
- "formatters": {
294
- "indent": {
295
- "()": IndentingFormatter,
296
- "format": "%(message)s",
297
- },
298
- "indent_with_timestamp": {
299
- "()": IndentingFormatter,
300
- "format": "%(message)s",
301
- "add_timestamp": True,
302
- },
303
- },
304
- "handlers": {
305
- "console": {
306
- "level": level,
307
- "class": handler_classes["stream"],
308
- "no_color": no_color,
309
- "stream": log_streams["stdout"],
310
- "filters": ["exclude_subprocess", "exclude_warnings"],
311
- "formatter": "indent",
312
- },
313
- "console_errors": {
314
- "level": "WARNING",
315
- "class": handler_classes["stream"],
316
- "no_color": no_color,
317
- "stream": log_streams["stderr"],
318
- "filters": ["exclude_subprocess"],
319
- "formatter": "indent",
320
- },
321
- # A handler responsible for logging to the console messages
322
- # from the "subprocessor" logger.
323
- "console_subprocess": {
324
- "level": level,
325
- "class": handler_classes["stream"],
326
- "stream": log_streams["stderr"],
327
- "no_color": no_color,
328
- "filters": ["restrict_to_subprocess"],
329
- "formatter": "indent",
330
- },
331
- "user_log": {
332
- "level": "DEBUG",
333
- "class": handler_classes["file"],
334
- "filename": additional_log_file,
335
- "encoding": "utf-8",
336
- "delay": True,
337
- "formatter": "indent_with_timestamp",
338
- },
339
- },
340
- "root": {
341
- "level": root_level,
342
- "handlers": handlers,
343
- },
344
- "loggers": {"pip._vendor": {"level": vendored_log_level}},
345
- }
346
- )
347
-
348
- return level_number
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/bdist.py DELETED
@@ -1,157 +0,0 @@
1
- """distutils.command.bdist
2
-
3
- Implements the Distutils 'bdist' command (create a built [binary]
4
- distribution)."""
5
-
6
- import os
7
- import warnings
8
-
9
- from distutils.core import Command
10
- from distutils.errors import DistutilsPlatformError, DistutilsOptionError
11
- from distutils.util import get_platform
12
-
13
-
14
- def show_formats():
15
- """Print list of available formats (arguments to "--format" option)."""
16
- from distutils.fancy_getopt import FancyGetopt
17
-
18
- formats = []
19
- for format in bdist.format_commands:
20
- formats.append(("formats=" + format, None, bdist.format_commands[format][1]))
21
- pretty_printer = FancyGetopt(formats)
22
- pretty_printer.print_help("List of available distribution formats:")
23
-
24
-
25
- class ListCompat(dict):
26
- # adapter to allow for Setuptools compatibility in format_commands
27
- def append(self, item):
28
- warnings.warn(
29
- """format_commands is now a dict. append is deprecated.""",
30
- DeprecationWarning,
31
- stacklevel=2,
32
- )
33
-
34
-
35
- class bdist(Command):
36
-
37
- description = "create a built (binary) distribution"
38
-
39
- user_options = [
40
- ('bdist-base=', 'b', "temporary directory for creating built distributions"),
41
- (
42
- 'plat-name=',
43
- 'p',
44
- "platform name to embed in generated filenames "
45
- "(default: %s)" % get_platform(),
46
- ),
47
- ('formats=', None, "formats for distribution (comma-separated list)"),
48
- (
49
- 'dist-dir=',
50
- 'd',
51
- "directory to put final built distributions in " "[default: dist]",
52
- ),
53
- ('skip-build', None, "skip rebuilding everything (for testing/debugging)"),
54
- (
55
- 'owner=',
56
- 'u',
57
- "Owner name used when creating a tar file" " [default: current user]",
58
- ),
59
- (
60
- 'group=',
61
- 'g',
62
- "Group name used when creating a tar file" " [default: current group]",
63
- ),
64
- ]
65
-
66
- boolean_options = ['skip-build']
67
-
68
- help_options = [
69
- ('help-formats', None, "lists available distribution formats", show_formats),
70
- ]
71
-
72
- # The following commands do not take a format option from bdist
73
- no_format_option = ('bdist_rpm',)
74
-
75
- # This won't do in reality: will need to distinguish RPM-ish Linux,
76
- # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
77
- default_format = {'posix': 'gztar', 'nt': 'zip'}
78
-
79
- # Define commands in preferred order for the --help-formats option
80
- format_commands = ListCompat(
81
- {
82
- 'rpm': ('bdist_rpm', "RPM distribution"),
83
- 'gztar': ('bdist_dumb', "gzip'ed tar file"),
84
- 'bztar': ('bdist_dumb', "bzip2'ed tar file"),
85
- 'xztar': ('bdist_dumb', "xz'ed tar file"),
86
- 'ztar': ('bdist_dumb', "compressed tar file"),
87
- 'tar': ('bdist_dumb', "tar file"),
88
- 'zip': ('bdist_dumb', "ZIP file"),
89
- }
90
- )
91
-
92
- # for compatibility until consumers only reference format_commands
93
- format_command = format_commands
94
-
95
- def initialize_options(self):
96
- self.bdist_base = None
97
- self.plat_name = None
98
- self.formats = None
99
- self.dist_dir = None
100
- self.skip_build = 0
101
- self.group = None
102
- self.owner = None
103
-
104
- def finalize_options(self):
105
- # have to finalize 'plat_name' before 'bdist_base'
106
- if self.plat_name is None:
107
- if self.skip_build:
108
- self.plat_name = get_platform()
109
- else:
110
- self.plat_name = self.get_finalized_command('build').plat_name
111
-
112
- # 'bdist_base' -- parent of per-built-distribution-format
113
- # temporary directories (eg. we'll probably have
114
- # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
115
- if self.bdist_base is None:
116
- build_base = self.get_finalized_command('build').build_base
117
- self.bdist_base = os.path.join(build_base, 'bdist.' + self.plat_name)
118
-
119
- self.ensure_string_list('formats')
120
- if self.formats is None:
121
- try:
122
- self.formats = [self.default_format[os.name]]
123
- except KeyError:
124
- raise DistutilsPlatformError(
125
- "don't know how to create built distributions "
126
- "on platform %s" % os.name
127
- )
128
-
129
- if self.dist_dir is None:
130
- self.dist_dir = "dist"
131
-
132
- def run(self):
133
- # Figure out which sub-commands we need to run.
134
- commands = []
135
- for format in self.formats:
136
- try:
137
- commands.append(self.format_commands[format][0])
138
- except KeyError:
139
- raise DistutilsOptionError("invalid format '%s'" % format)
140
-
141
- # Reinitialize and run each command.
142
- for i in range(len(self.formats)):
143
- cmd_name = commands[i]
144
- sub_cmd = self.reinitialize_command(cmd_name)
145
- if cmd_name not in self.no_format_option:
146
- sub_cmd.format = self.formats[i]
147
-
148
- # passing the owner and group names for tar archiving
149
- if cmd_name == 'bdist_dumb':
150
- sub_cmd.owner = self.owner
151
- sub_cmd.group = self.group
152
-
153
- # If we're going to need to run this command again, tell it to
154
- # keep its temporary files around so subsequent runs go faster.
155
- if cmd_name in commands[i + 1 :]:
156
- sub_cmd.keep_temp = 1
157
- self.run_command(cmd_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/config/_validate_pyproject/formats.py DELETED
@@ -1,259 +0,0 @@
1
- import logging
2
- import os
3
- import re
4
- import string
5
- import typing
6
- from itertools import chain as _chain
7
-
8
- _logger = logging.getLogger(__name__)
9
-
10
- # -------------------------------------------------------------------------------------
11
- # PEP 440
12
-
13
- VERSION_PATTERN = r"""
14
- v?
15
- (?:
16
- (?:(?P<epoch>[0-9]+)!)? # epoch
17
- (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
18
- (?P<pre> # pre-release
19
- [-_\.]?
20
- (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
21
- [-_\.]?
22
- (?P<pre_n>[0-9]+)?
23
- )?
24
- (?P<post> # post release
25
- (?:-(?P<post_n1>[0-9]+))
26
- |
27
- (?:
28
- [-_\.]?
29
- (?P<post_l>post|rev|r)
30
- [-_\.]?
31
- (?P<post_n2>[0-9]+)?
32
- )
33
- )?
34
- (?P<dev> # dev release
35
- [-_\.]?
36
- (?P<dev_l>dev)
37
- [-_\.]?
38
- (?P<dev_n>[0-9]+)?
39
- )?
40
- )
41
- (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
42
- """
43
-
44
- VERSION_REGEX = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.X | re.I)
45
-
46
-
47
- def pep440(version: str) -> bool:
48
- return VERSION_REGEX.match(version) is not None
49
-
50
-
51
- # -------------------------------------------------------------------------------------
52
- # PEP 508
53
-
54
- PEP508_IDENTIFIER_PATTERN = r"([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])"
55
- PEP508_IDENTIFIER_REGEX = re.compile(f"^{PEP508_IDENTIFIER_PATTERN}$", re.I)
56
-
57
-
58
- def pep508_identifier(name: str) -> bool:
59
- return PEP508_IDENTIFIER_REGEX.match(name) is not None
60
-
61
-
62
- try:
63
- try:
64
- from packaging import requirements as _req
65
- except ImportError: # pragma: no cover
66
- # let's try setuptools vendored version
67
- from setuptools._vendor.packaging import requirements as _req # type: ignore
68
-
69
- def pep508(value: str) -> bool:
70
- try:
71
- _req.Requirement(value)
72
- return True
73
- except _req.InvalidRequirement:
74
- return False
75
-
76
- except ImportError: # pragma: no cover
77
- _logger.warning(
78
- "Could not find an installation of `packaging`. Requirements, dependencies and "
79
- "versions might not be validated. "
80
- "To enforce validation, please install `packaging`."
81
- )
82
-
83
- def pep508(value: str) -> bool:
84
- return True
85
-
86
-
87
- def pep508_versionspec(value: str) -> bool:
88
- """Expression that can be used to specify/lock versions (including ranges)"""
89
- if any(c in value for c in (";", "]", "@")):
90
- # In PEP 508:
91
- # conditional markers, extras and URL specs are not included in the
92
- # versionspec
93
- return False
94
- # Let's pretend we have a dependency called `requirement` with the given
95
- # version spec, then we can re-use the pep508 function for validation:
96
- return pep508(f"requirement{value}")
97
-
98
-
99
- # -------------------------------------------------------------------------------------
100
- # PEP 517
101
-
102
-
103
- def pep517_backend_reference(value: str) -> bool:
104
- module, _, obj = value.partition(":")
105
- identifiers = (i.strip() for i in _chain(module.split("."), obj.split(".")))
106
- return all(python_identifier(i) for i in identifiers if i)
107
-
108
-
109
- # -------------------------------------------------------------------------------------
110
- # Classifiers - PEP 301
111
-
112
-
113
- def _download_classifiers() -> str:
114
- import ssl
115
- from email.message import Message
116
- from urllib.request import urlopen
117
-
118
- url = "https://pypi.org/pypi?:action=list_classifiers"
119
- context = ssl.create_default_context()
120
- with urlopen(url, context=context) as response:
121
- headers = Message()
122
- headers["content_type"] = response.getheader("content-type", "text/plain")
123
- return response.read().decode(headers.get_param("charset", "utf-8"))
124
-
125
-
126
- class _TroveClassifier:
127
- """The ``trove_classifiers`` package is the official way of validating classifiers,
128
- however this package might not be always available.
129
- As a workaround we can still download a list from PyPI.
130
- We also don't want to be over strict about it, so simply skipping silently is an
131
- option (classifiers will be validated anyway during the upload to PyPI).
132
- """
133
-
134
- def __init__(self):
135
- self.downloaded: typing.Union[None, False, typing.Set[str]] = None
136
- self._skip_download = False
137
- # None => not cached yet
138
- # False => cache not available
139
- self.__name__ = "trove_classifier" # Emulate a public function
140
-
141
- def _disable_download(self):
142
- # This is a private API. Only setuptools has the consent of using it.
143
- self._skip_download = True
144
-
145
- def __call__(self, value: str) -> bool:
146
- if self.downloaded is False or self._skip_download is True:
147
- return True
148
-
149
- if os.getenv("NO_NETWORK") or os.getenv("VALIDATE_PYPROJECT_NO_NETWORK"):
150
- self.downloaded = False
151
- msg = (
152
- "Install ``trove-classifiers`` to ensure proper validation. "
153
- "Skipping download of classifiers list from PyPI (NO_NETWORK)."
154
- )
155
- _logger.debug(msg)
156
- return True
157
-
158
- if self.downloaded is None:
159
- msg = (
160
- "Install ``trove-classifiers`` to ensure proper validation. "
161
- "Meanwhile a list of classifiers will be downloaded from PyPI."
162
- )
163
- _logger.debug(msg)
164
- try:
165
- self.downloaded = set(_download_classifiers().splitlines())
166
- except Exception:
167
- self.downloaded = False
168
- _logger.debug("Problem with download, skipping validation")
169
- return True
170
-
171
- return value in self.downloaded or value.lower().startswith("private ::")
172
-
173
-
174
- try:
175
- from trove_classifiers import classifiers as _trove_classifiers
176
-
177
- def trove_classifier(value: str) -> bool:
178
- return value in _trove_classifiers or value.lower().startswith("private ::")
179
-
180
- except ImportError: # pragma: no cover
181
- trove_classifier = _TroveClassifier()
182
-
183
-
184
- # -------------------------------------------------------------------------------------
185
- # Non-PEP related
186
-
187
-
188
- def url(value: str) -> bool:
189
- from urllib.parse import urlparse
190
-
191
- try:
192
- parts = urlparse(value)
193
- if not parts.scheme:
194
- _logger.warning(
195
- "For maximum compatibility please make sure to include a "
196
- "`scheme` prefix in your URL (e.g. 'http://'). "
197
- f"Given value: {value}"
198
- )
199
- if not (value.startswith("/") or value.startswith("\\") or "@" in value):
200
- parts = urlparse(f"http://{value}")
201
-
202
- return bool(parts.scheme and parts.netloc)
203
- except Exception:
204
- return False
205
-
206
-
207
- # https://packaging.python.org/specifications/entry-points/
208
- ENTRYPOINT_PATTERN = r"[^\[\s=]([^=]*[^\s=])?"
209
- ENTRYPOINT_REGEX = re.compile(f"^{ENTRYPOINT_PATTERN}$", re.I)
210
- RECOMMEDED_ENTRYPOINT_PATTERN = r"[\w.-]+"
211
- RECOMMEDED_ENTRYPOINT_REGEX = re.compile(f"^{RECOMMEDED_ENTRYPOINT_PATTERN}$", re.I)
212
- ENTRYPOINT_GROUP_PATTERN = r"\w+(\.\w+)*"
213
- ENTRYPOINT_GROUP_REGEX = re.compile(f"^{ENTRYPOINT_GROUP_PATTERN}$", re.I)
214
-
215
-
216
- def python_identifier(value: str) -> bool:
217
- return value.isidentifier()
218
-
219
-
220
- def python_qualified_identifier(value: str) -> bool:
221
- if value.startswith(".") or value.endswith("."):
222
- return False
223
- return all(python_identifier(m) for m in value.split("."))
224
-
225
-
226
- def python_module_name(value: str) -> bool:
227
- return python_qualified_identifier(value)
228
-
229
-
230
- def python_entrypoint_group(value: str) -> bool:
231
- return ENTRYPOINT_GROUP_REGEX.match(value) is not None
232
-
233
-
234
- def python_entrypoint_name(value: str) -> bool:
235
- if not ENTRYPOINT_REGEX.match(value):
236
- return False
237
- if not RECOMMEDED_ENTRYPOINT_REGEX.match(value):
238
- msg = f"Entry point `{value}` does not follow recommended pattern: "
239
- msg += RECOMMEDED_ENTRYPOINT_PATTERN
240
- _logger.warning(msg)
241
- return True
242
-
243
-
244
- def python_entrypoint_reference(value: str) -> bool:
245
- module, _, rest = value.partition(":")
246
- if "[" in rest:
247
- obj, _, extras_ = rest.partition("[")
248
- if extras_.strip()[-1] != "]":
249
- return False
250
- extras = (x.strip() for x in extras_.strip(string.whitespace + "[]").split(","))
251
- if not all(pep508_identifier(e) for e in extras):
252
- return False
253
- _logger.warning(f"`{value}` - using extras for entry points is not recommended")
254
- else:
255
- obj = rest
256
-
257
- module_parts = module.split(".")
258
- identifiers = _chain(module_parts, obj.split(".")) if rest else module_parts
259
- return all(python_identifier(i.strip()) for i in identifiers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/colormap.py DELETED
@@ -1,140 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
-
3
- """
4
- An awesome colormap for really neat visualizations.
5
- Copied from Detectron, and removed gray colors.
6
- """
7
-
8
- import numpy as np
9
-
10
- __all__ = ["colormap", "random_color"]
11
-
12
- # fmt: off
13
- # RGB:
14
- _COLORS = np.array(
15
- [
16
- 0.000, 0.447, 0.741,
17
- 0.850, 0.325, 0.098,
18
- 0.929, 0.694, 0.125,
19
- 0.494, 0.184, 0.556,
20
- 0.466, 0.674, 0.188,
21
- 0.301, 0.745, 0.933,
22
- 0.635, 0.078, 0.184,
23
- 0.300, 0.300, 0.300,
24
- 0.600, 0.600, 0.600,
25
- 1.000, 0.000, 0.000,
26
- 1.000, 0.500, 0.000,
27
- 0.749, 0.749, 0.000,
28
- 0.000, 1.000, 0.000,
29
- 0.000, 0.000, 1.000,
30
- 0.667, 0.000, 1.000,
31
- 0.333, 0.333, 0.000,
32
- 0.333, 0.667, 0.000,
33
- 0.333, 1.000, 0.000,
34
- 0.667, 0.333, 0.000,
35
- 0.667, 0.667, 0.000,
36
- 0.667, 1.000, 0.000,
37
- 1.000, 0.333, 0.000,
38
- 1.000, 0.667, 0.000,
39
- 1.000, 1.000, 0.000,
40
- 0.000, 0.333, 0.500,
41
- 0.000, 0.667, 0.500,
42
- 0.000, 1.000, 0.500,
43
- 0.333, 0.000, 0.500,
44
- 0.333, 0.333, 0.500,
45
- 0.333, 0.667, 0.500,
46
- 0.333, 1.000, 0.500,
47
- 0.667, 0.000, 0.500,
48
- 0.667, 0.333, 0.500,
49
- 0.667, 0.667, 0.500,
50
- 0.667, 1.000, 0.500,
51
- 1.000, 0.000, 0.500,
52
- 1.000, 0.333, 0.500,
53
- 1.000, 0.667, 0.500,
54
- 1.000, 1.000, 0.500,
55
- 0.000, 0.333, 1.000,
56
- 0.000, 0.667, 1.000,
57
- 0.000, 1.000, 1.000,
58
- 0.333, 0.000, 1.000,
59
- 0.333, 0.333, 1.000,
60
- 0.333, 0.667, 1.000,
61
- 0.333, 1.000, 1.000,
62
- 0.667, 0.000, 1.000,
63
- 0.667, 0.333, 1.000,
64
- 0.667, 0.667, 1.000,
65
- 0.667, 1.000, 1.000,
66
- 1.000, 0.000, 1.000,
67
- 1.000, 0.333, 1.000,
68
- 1.000, 0.667, 1.000,
69
- 0.333, 0.000, 0.000,
70
- 0.500, 0.000, 0.000,
71
- 0.667, 0.000, 0.000,
72
- 0.833, 0.000, 0.000,
73
- 1.000, 0.000, 0.000,
74
- 0.000, 0.167, 0.000,
75
- 0.000, 0.333, 0.000,
76
- 0.000, 0.500, 0.000,
77
- 0.000, 0.667, 0.000,
78
- 0.000, 0.833, 0.000,
79
- 0.000, 1.000, 0.000,
80
- 0.000, 0.000, 0.167,
81
- 0.000, 0.000, 0.333,
82
- 0.000, 0.000, 0.500,
83
- 0.000, 0.000, 0.667,
84
- 0.000, 0.000, 0.833,
85
- 0.000, 0.000, 1.000,
86
- 0.000, 0.000, 0.000,
87
- 0.143, 0.143, 0.143,
88
- 0.857, 0.857, 0.857,
89
- 1.000, 1.000, 1.000
90
- ]
91
- ).astype(np.float32).reshape(-1, 3)
92
- # fmt: on
93
-
94
-
95
- def colormap(rgb=False, maximum=255):
96
- """
97
- Args:
98
- rgb (bool): whether to return RGB colors or BGR colors.
99
- maximum (int): either 255 or 1
100
-
101
- Returns:
102
- ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
103
- """
104
- assert maximum in [255, 1], maximum
105
- c = _COLORS * maximum
106
- if not rgb:
107
- c = c[:, ::-1]
108
- return c
109
-
110
-
111
- def random_color(rgb=False, maximum=255):
112
- """
113
- Args:
114
- rgb (bool): whether to return RGB colors or BGR colors.
115
- maximum (int): either 255 or 1
116
-
117
- Returns:
118
- ndarray: a vector of 3 numbers
119
- """
120
- idx = np.random.randint(0, len(_COLORS))
121
- ret = _COLORS[idx] * maximum
122
- if not rgb:
123
- ret = ret[::-1]
124
- return ret
125
-
126
-
127
- if __name__ == "__main__":
128
- import cv2
129
-
130
- size = 100
131
- H, W = 10, 10
132
- canvas = np.random.rand(H * size, W * size, 3).astype("float32")
133
- for h in range(H):
134
- for w in range(W):
135
- idx = h * W + w
136
- if idx >= len(_COLORS):
137
- break
138
- canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
139
- cv2.imshow("a", canvas)
140
- cv2.waitKey(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/deploy/caffe2_converter.py DELETED
@@ -1,64 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3
- import argparse
4
- import os
5
-
6
- from detectron2.checkpoint import DetectionCheckpointer
7
- from detectron2.config import get_cfg
8
- from detectron2.data import build_detection_test_loader
9
- from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
10
- from detectron2.export import add_export_config, export_caffe2_model
11
- from detectron2.modeling import build_model
12
- from detectron2.utils.logger import setup_logger
13
-
14
-
15
- def setup_cfg(args):
16
- cfg = get_cfg()
17
- # cuda context is initialized before creating dataloader, so we don't fork anymore
18
- cfg.DATALOADER.NUM_WORKERS = 0
19
- cfg = add_export_config(cfg)
20
- cfg.merge_from_file(args.config_file)
21
- cfg.merge_from_list(args.opts)
22
- cfg.freeze()
23
- return cfg
24
-
25
-
26
- if __name__ == "__main__":
27
- parser = argparse.ArgumentParser(description="Convert a model to Caffe2")
28
- parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
29
- parser.add_argument("--run-eval", action="store_true")
30
- parser.add_argument("--output", help="output directory for the converted caffe2 model")
31
- parser.add_argument(
32
- "opts",
33
- help="Modify config options using the command-line",
34
- default=None,
35
- nargs=argparse.REMAINDER,
36
- )
37
- args = parser.parse_args()
38
- logger = setup_logger()
39
- logger.info("Command line arguments: " + str(args))
40
-
41
- cfg = setup_cfg(args)
42
-
43
- # create a torch model
44
- torch_model = build_model(cfg)
45
- DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
46
-
47
- # get a sample data
48
- data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
49
- first_batch = next(iter(data_loader))
50
-
51
- # convert and save caffe2 model
52
- caffe2_model = export_caffe2_model(cfg, torch_model, first_batch)
53
- caffe2_model.save_protobuf(args.output)
54
- # draw the caffe2 graph
55
- caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch)
56
-
57
- # run evaluation with the converted model
58
- if args.run_eval:
59
- dataset = cfg.DATASETS.TEST[0]
60
- data_loader = build_detection_test_loader(cfg, dataset)
61
- # NOTE: hard-coded evaluator. change to the evaluator for your dataset
62
- evaluator = COCOEvaluator(dataset, cfg, True, args.output)
63
- metrics = inference_on_dataset(caffe2_model, data_loader, evaluator)
64
- print_csv_format(metrics)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/dependencies/cub/examples/device/Makefile DELETED
@@ -1,197 +0,0 @@
1
- #/******************************************************************************
2
- # * Copyright (c) 2011, Duane Merrill. All rights reserved.
3
- # * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4
- # *
5
- # * Redistribution and use in source and binary forms, with or without
6
- # * modification, are permitted provided that the following conditions are met:
7
- # * * Redistributions of source code must retain the above copyright
8
- # * notice, this list of conditions and the following disclaimer.
9
- # * * Redistributions in binary form must reproduce the above copyright
10
- # * notice, this list of conditions and the following disclaimer in the
11
- # * documentation and/or other materials provided with the distribution.
12
- # * * Neither the name of the NVIDIA CORPORATION nor the
13
- # * names of its contributors may be used to endorse or promote products
14
- # * derived from this software without specific prior written permission.
15
- # *
16
- # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17
- # * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
- # * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
- # * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20
- # * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
- # * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
- # * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
- # * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
- # * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
- # * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
- # *
27
- #******************************************************************************/
28
-
29
- #-------------------------------------------------------------------------------
30
- #
31
- # Makefile usage
32
- #
33
- # make <target> [sm=<XXX,...>] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>]
34
- #
35
- #-------------------------------------------------------------------------------
36
-
37
- include ../../common.mk
38
-
39
-
40
- #-------------------------------------------------------------------------------
41
- # Includes
42
- #-------------------------------------------------------------------------------
43
-
44
- INC += -I$(CUB_DIR) -I$(CUB_DIR)test
45
-
46
-
47
-
48
- #-------------------------------------------------------------------------------
49
- # Dependency Lists
50
- #-------------------------------------------------------------------------------
51
-
52
- rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
53
-
54
- DEPS = $(CUB_DEPS) \
55
- $(CUB_DIR)test/Makefile \
56
- $(CUB_DIR)test/test_util.h \
57
- $(CUB_DIR)test/mersenne.h \
58
-
59
- ALL = example_device_partition_flagged \
60
- example_device_partition_if \
61
- example_device_radix_sort \
62
- example_device_reduce \
63
- example_device_scan \
64
- example_device_select_unique \
65
- example_device_select_flagged \
66
- example_device_select_if \
67
- example_device_sort_find_non_trivial_runs
68
-
69
-
70
-
71
- #-------------------------------------------------------------------------------
72
- # make default
73
- #-------------------------------------------------------------------------------
74
-
75
- default:
76
-
77
-
78
- #-------------------------------------------------------------------------------
79
- # make clean
80
- #-------------------------------------------------------------------------------
81
-
82
- clean :
83
- rm -f bin/*$(CPU_ARCH_SUFFIX)*
84
- rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o
85
-
86
-
87
- #-------------------------------------------------------------------------------
88
- # make all
89
- #-------------------------------------------------------------------------------
90
-
91
- all : $(ALL)
92
-
93
- #-------------------------------------------------------------------------------
94
- # make run
95
- #-------------------------------------------------------------------------------
96
-
97
- run :
98
- for i in $(ALL); do ./bin/$${i}_$(BIN_SUFFIX) --device=$(device) || exit 1; done
99
-
100
-
101
- #-------------------------------------------------------------------------------
102
- # make example_device_reduce
103
- #-------------------------------------------------------------------------------
104
-
105
- example_device_reduce: bin/example_device_reduce_$(BIN_SUFFIX)
106
-
107
- bin/example_device_reduce_$(BIN_SUFFIX) : example_device_reduce.cu $(DEPS)
108
- mkdir -p bin
109
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_reduce_$(BIN_SUFFIX) example_device_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
110
-
111
-
112
- #-------------------------------------------------------------------------------
113
- # make example_device_partition_flagged
114
- #-------------------------------------------------------------------------------
115
-
116
- example_device_partition_flagged: bin/example_device_partition_flagged_$(BIN_SUFFIX)
117
-
118
- bin/example_device_partition_flagged_$(BIN_SUFFIX) : example_device_partition_flagged.cu $(DEPS)
119
- mkdir -p bin
120
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_partition_flagged_$(BIN_SUFFIX) example_device_partition_flagged.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
121
-
122
- #-------------------------------------------------------------------------------
123
- # make example_device_partition_if
124
- #-------------------------------------------------------------------------------
125
-
126
- example_device_partition_if: bin/example_device_partition_if_$(BIN_SUFFIX)
127
-
128
- bin/example_device_partition_if_$(BIN_SUFFIX) : example_device_partition_if.cu $(DEPS)
129
- mkdir -p bin
130
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_partition_if_$(BIN_SUFFIX) example_device_partition_if.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
131
-
132
- #-------------------------------------------------------------------------------
133
- # make example_device_scan
134
- #-------------------------------------------------------------------------------
135
-
136
- example_device_scan: bin/example_device_scan_$(BIN_SUFFIX)
137
-
138
- bin/example_device_scan_$(BIN_SUFFIX) : example_device_scan.cu $(DEPS)
139
- mkdir -p bin
140
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_scan_$(BIN_SUFFIX) example_device_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
141
-
142
-
143
- #-------------------------------------------------------------------------------
144
- # make example_device_radix_sort
145
- #-------------------------------------------------------------------------------
146
-
147
- example_device_radix_sort: bin/example_device_radix_sort_$(BIN_SUFFIX)
148
-
149
- bin/example_device_radix_sort_$(BIN_SUFFIX) : example_device_radix_sort.cu $(DEPS)
150
- mkdir -p bin
151
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_radix_sort_$(BIN_SUFFIX) example_device_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
152
-
153
-
154
- #-------------------------------------------------------------------------------
155
- # make example_device_select_unique
156
- #-------------------------------------------------------------------------------
157
-
158
- example_device_select_unique: bin/example_device_select_unique_$(BIN_SUFFIX)
159
-
160
- bin/example_device_select_unique_$(BIN_SUFFIX) : example_device_select_unique.cu $(DEPS)
161
- mkdir -p bin
162
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_select_unique_$(BIN_SUFFIX) example_device_select_unique.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
163
-
164
-
165
- #-------------------------------------------------------------------------------
166
- # make example_device_select_flagged
167
- #-------------------------------------------------------------------------------
168
-
169
- example_device_select_flagged: bin/example_device_select_flagged_$(BIN_SUFFIX)
170
-
171
- bin/example_device_select_flagged_$(BIN_SUFFIX) : example_device_select_flagged.cu $(DEPS)
172
- mkdir -p bin
173
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_select_flagged_$(BIN_SUFFIX) example_device_select_flagged.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
174
-
175
- #-------------------------------------------------------------------------------
176
- # make example_device_select_if
177
- #-------------------------------------------------------------------------------
178
-
179
- example_device_select_if: bin/example_device_select_if_$(BIN_SUFFIX)
180
-
181
- bin/example_device_select_if_$(BIN_SUFFIX) : example_device_select_if.cu $(DEPS)
182
- mkdir -p bin
183
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_select_if_$(BIN_SUFFIX) example_device_select_if.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
184
-
185
-
186
- #-------------------------------------------------------------------------------
187
- # make example_device_sort_find_non_trivial_runs
188
- #-------------------------------------------------------------------------------
189
-
190
- example_device_sort_find_non_trivial_runs: bin/example_device_sort_find_non_trivial_runs_$(BIN_SUFFIX)
191
-
192
- bin/example_device_sort_find_non_trivial_runs_$(BIN_SUFFIX) : example_device_sort_find_non_trivial_runs.cu $(DEPS)
193
- mkdir -p bin
194
- $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_sort_find_non_trivial_runs_$(BIN_SUFFIX) example_device_sort_find_non_trivial_runs.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
195
-
196
-
197
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/utils.py DELETED
@@ -1,56 +0,0 @@
1
- import os
2
- import os.path as osp
3
-
4
- def get_experiment_id(debug=False):
5
- if debug:
6
- return 999999999999
7
- import time
8
- time.sleep(0.5)
9
- return int(time.time()*100)
10
-
11
- def get_path_schedule(type, **kwargs):
12
- if type == 'repeat':
13
- max_path = kwargs['max_path']
14
- schedule_each = kwargs['schedule_each']
15
- return [schedule_each] * max_path
16
- elif type == 'list':
17
- schedule = kwargs['schedule']
18
- return schedule
19
- elif type == 'exp':
20
- import math
21
- base = kwargs['base']
22
- max_path = kwargs['max_path']
23
- max_path_per_iter = kwargs['max_path_per_iter']
24
- schedule = []
25
- cnt = 0
26
- while sum(schedule) < max_path:
27
- proposed_step = min(
28
- max_path - sum(schedule),
29
- base**cnt,
30
- max_path_per_iter)
31
- cnt += 1
32
- schedule += [proposed_step]
33
- return schedule
34
- else:
35
- raise ValueError
36
-
37
- def edict_2_dict(x):
38
- if isinstance(x, dict):
39
- xnew = {}
40
- for k in x:
41
- xnew[k] = edict_2_dict(x[k])
42
- return xnew
43
- elif isinstance(x, list):
44
- xnew = []
45
- for i in range(len(x)):
46
- xnew.append( edict_2_dict(x[i]) )
47
- return xnew
48
- else:
49
- return x
50
-
51
- def check_and_create_dir(path):
52
- pathdir = osp.split(path)[0]
53
- if osp.isdir(pathdir):
54
- pass
55
- else:
56
- os.makedirs(pathdir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/atss_head.py DELETED
@@ -1,689 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
4
- from mmcv.runner import force_fp32
5
-
6
- from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,
7
- images_to_levels, multi_apply, multiclass_nms,
8
- reduce_mean, unmap)
9
- from ..builder import HEADS, build_loss
10
- from .anchor_head import AnchorHead
11
-
12
- EPS = 1e-12
13
-
14
-
15
- @HEADS.register_module()
16
- class ATSSHead(AnchorHead):
17
- """Bridging the Gap Between Anchor-based and Anchor-free Detection via
18
- Adaptive Training Sample Selection.
19
-
20
- ATSS head structure is similar with FCOS, however ATSS use anchor boxes
21
- and assign label by Adaptive Training Sample Selection instead max-iou.
22
-
23
- https://arxiv.org/abs/1912.02424
24
- """
25
-
26
- def __init__(self,
27
- num_classes,
28
- in_channels,
29
- stacked_convs=4,
30
- conv_cfg=None,
31
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
32
- loss_centerness=dict(
33
- type='CrossEntropyLoss',
34
- use_sigmoid=True,
35
- loss_weight=1.0),
36
- **kwargs):
37
- self.stacked_convs = stacked_convs
38
- self.conv_cfg = conv_cfg
39
- self.norm_cfg = norm_cfg
40
- super(ATSSHead, self).__init__(num_classes, in_channels, **kwargs)
41
-
42
- self.sampling = False
43
- if self.train_cfg:
44
- self.assigner = build_assigner(self.train_cfg.assigner)
45
- # SSD sampling=False so use PseudoSampler
46
- sampler_cfg = dict(type='PseudoSampler')
47
- self.sampler = build_sampler(sampler_cfg, context=self)
48
- self.loss_centerness = build_loss(loss_centerness)
49
-
50
- def _init_layers(self):
51
- """Initialize layers of the head."""
52
- self.relu = nn.ReLU(inplace=True)
53
- self.cls_convs = nn.ModuleList()
54
- self.reg_convs = nn.ModuleList()
55
- for i in range(self.stacked_convs):
56
- chn = self.in_channels if i == 0 else self.feat_channels
57
- self.cls_convs.append(
58
- ConvModule(
59
- chn,
60
- self.feat_channels,
61
- 3,
62
- stride=1,
63
- padding=1,
64
- conv_cfg=self.conv_cfg,
65
- norm_cfg=self.norm_cfg))
66
- self.reg_convs.append(
67
- ConvModule(
68
- chn,
69
- self.feat_channels,
70
- 3,
71
- stride=1,
72
- padding=1,
73
- conv_cfg=self.conv_cfg,
74
- norm_cfg=self.norm_cfg))
75
- self.atss_cls = nn.Conv2d(
76
- self.feat_channels,
77
- self.num_anchors * self.cls_out_channels,
78
- 3,
79
- padding=1)
80
- self.atss_reg = nn.Conv2d(
81
- self.feat_channels, self.num_anchors * 4, 3, padding=1)
82
- self.atss_centerness = nn.Conv2d(
83
- self.feat_channels, self.num_anchors * 1, 3, padding=1)
84
- self.scales = nn.ModuleList(
85
- [Scale(1.0) for _ in self.anchor_generator.strides])
86
-
87
- def init_weights(self):
88
- """Initialize weights of the head."""
89
- for m in self.cls_convs:
90
- normal_init(m.conv, std=0.01)
91
- for m in self.reg_convs:
92
- normal_init(m.conv, std=0.01)
93
- bias_cls = bias_init_with_prob(0.01)
94
- normal_init(self.atss_cls, std=0.01, bias=bias_cls)
95
- normal_init(self.atss_reg, std=0.01)
96
- normal_init(self.atss_centerness, std=0.01)
97
-
98
- def forward(self, feats):
99
- """Forward features from the upstream network.
100
-
101
- Args:
102
- feats (tuple[Tensor]): Features from the upstream network, each is
103
- a 4D-tensor.
104
-
105
- Returns:
106
- tuple: Usually a tuple of classification scores and bbox prediction
107
- cls_scores (list[Tensor]): Classification scores for all scale
108
- levels, each is a 4D-tensor, the channels number is
109
- num_anchors * num_classes.
110
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
111
- levels, each is a 4D-tensor, the channels number is
112
- num_anchors * 4.
113
- """
114
- return multi_apply(self.forward_single, feats, self.scales)
115
-
116
- def forward_single(self, x, scale):
117
- """Forward feature of a single scale level.
118
-
119
- Args:
120
- x (Tensor): Features of a single scale level.
121
- scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
122
- the bbox prediction.
123
-
124
- Returns:
125
- tuple:
126
- cls_score (Tensor): Cls scores for a single scale level
127
- the channels number is num_anchors * num_classes.
128
- bbox_pred (Tensor): Box energies / deltas for a single scale
129
- level, the channels number is num_anchors * 4.
130
- centerness (Tensor): Centerness for a single scale level, the
131
- channel number is (N, num_anchors * 1, H, W).
132
- """
133
- cls_feat = x
134
- reg_feat = x
135
- for cls_conv in self.cls_convs:
136
- cls_feat = cls_conv(cls_feat)
137
- for reg_conv in self.reg_convs:
138
- reg_feat = reg_conv(reg_feat)
139
- cls_score = self.atss_cls(cls_feat)
140
- # we just follow atss, not apply exp in bbox_pred
141
- bbox_pred = scale(self.atss_reg(reg_feat)).float()
142
- centerness = self.atss_centerness(reg_feat)
143
- return cls_score, bbox_pred, centerness
144
-
145
- def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
146
- label_weights, bbox_targets, num_total_samples):
147
- """Compute loss of a single scale level.
148
-
149
- Args:
150
- cls_score (Tensor): Box scores for each scale level
151
- Has shape (N, num_anchors * num_classes, H, W).
152
- bbox_pred (Tensor): Box energies / deltas for each scale
153
- level with shape (N, num_anchors * 4, H, W).
154
- anchors (Tensor): Box reference for each scale level with shape
155
- (N, num_total_anchors, 4).
156
- labels (Tensor): Labels of each anchors with shape
157
- (N, num_total_anchors).
158
- label_weights (Tensor): Label weights of each anchor with shape
159
- (N, num_total_anchors)
160
- bbox_targets (Tensor): BBox regression targets of each anchor wight
161
- shape (N, num_total_anchors, 4).
162
- num_total_samples (int): Number os positive samples that is
163
- reduced over all GPUs.
164
-
165
- Returns:
166
- dict[str, Tensor]: A dictionary of loss components.
167
- """
168
-
169
- anchors = anchors.reshape(-1, 4)
170
- cls_score = cls_score.permute(0, 2, 3, 1).reshape(
171
- -1, self.cls_out_channels).contiguous()
172
- bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
173
- centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
174
- bbox_targets = bbox_targets.reshape(-1, 4)
175
- labels = labels.reshape(-1)
176
- label_weights = label_weights.reshape(-1)
177
-
178
- # classification loss
179
- loss_cls = self.loss_cls(
180
- cls_score, labels, label_weights, avg_factor=num_total_samples)
181
-
182
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
183
- bg_class_ind = self.num_classes
184
- pos_inds = ((labels >= 0)
185
- & (labels < bg_class_ind)).nonzero().squeeze(1)
186
-
187
- if len(pos_inds) > 0:
188
- pos_bbox_targets = bbox_targets[pos_inds]
189
- pos_bbox_pred = bbox_pred[pos_inds]
190
- pos_anchors = anchors[pos_inds]
191
- pos_centerness = centerness[pos_inds]
192
-
193
- centerness_targets = self.centerness_target(
194
- pos_anchors, pos_bbox_targets)
195
- pos_decode_bbox_pred = self.bbox_coder.decode(
196
- pos_anchors, pos_bbox_pred)
197
- pos_decode_bbox_targets = self.bbox_coder.decode(
198
- pos_anchors, pos_bbox_targets)
199
-
200
- # regression loss
201
- loss_bbox = self.loss_bbox(
202
- pos_decode_bbox_pred,
203
- pos_decode_bbox_targets,
204
- weight=centerness_targets,
205
- avg_factor=1.0)
206
-
207
- # centerness loss
208
- loss_centerness = self.loss_centerness(
209
- pos_centerness,
210
- centerness_targets,
211
- avg_factor=num_total_samples)
212
-
213
- else:
214
- loss_bbox = bbox_pred.sum() * 0
215
- loss_centerness = centerness.sum() * 0
216
- centerness_targets = bbox_targets.new_tensor(0.)
217
-
218
- return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
219
-
220
- @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
221
- def loss(self,
222
- cls_scores,
223
- bbox_preds,
224
- centernesses,
225
- gt_bboxes,
226
- gt_labels,
227
- img_metas,
228
- gt_bboxes_ignore=None):
229
- """Compute losses of the head.
230
-
231
- Args:
232
- cls_scores (list[Tensor]): Box scores for each scale level
233
- Has shape (N, num_anchors * num_classes, H, W)
234
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
235
- level with shape (N, num_anchors * 4, H, W)
236
- centernesses (list[Tensor]): Centerness for each scale
237
- level with shape (N, num_anchors * 1, H, W)
238
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
239
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
240
- gt_labels (list[Tensor]): class indices corresponding to each box
241
- img_metas (list[dict]): Meta information of each image, e.g.,
242
- image size, scaling factor, etc.
243
- gt_bboxes_ignore (list[Tensor] | None): specify which bounding
244
- boxes can be ignored when computing the loss.
245
-
246
- Returns:
247
- dict[str, Tensor]: A dictionary of loss components.
248
- """
249
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
250
- assert len(featmap_sizes) == self.anchor_generator.num_levels
251
-
252
- device = cls_scores[0].device
253
- anchor_list, valid_flag_list = self.get_anchors(
254
- featmap_sizes, img_metas, device=device)
255
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
256
-
257
- cls_reg_targets = self.get_targets(
258
- anchor_list,
259
- valid_flag_list,
260
- gt_bboxes,
261
- img_metas,
262
- gt_bboxes_ignore_list=gt_bboxes_ignore,
263
- gt_labels_list=gt_labels,
264
- label_channels=label_channels)
265
- if cls_reg_targets is None:
266
- return None
267
-
268
- (anchor_list, labels_list, label_weights_list, bbox_targets_list,
269
- bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
270
-
271
- num_total_samples = reduce_mean(
272
- torch.tensor(num_total_pos, dtype=torch.float,
273
- device=device)).item()
274
- num_total_samples = max(num_total_samples, 1.0)
275
-
276
- losses_cls, losses_bbox, loss_centerness,\
277
- bbox_avg_factor = multi_apply(
278
- self.loss_single,
279
- anchor_list,
280
- cls_scores,
281
- bbox_preds,
282
- centernesses,
283
- labels_list,
284
- label_weights_list,
285
- bbox_targets_list,
286
- num_total_samples=num_total_samples)
287
-
288
- bbox_avg_factor = sum(bbox_avg_factor)
289
- bbox_avg_factor = reduce_mean(bbox_avg_factor).item()
290
- if bbox_avg_factor < EPS:
291
- bbox_avg_factor = 1
292
- losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
293
- return dict(
294
- loss_cls=losses_cls,
295
- loss_bbox=losses_bbox,
296
- loss_centerness=loss_centerness)
297
-
298
- def centerness_target(self, anchors, bbox_targets):
299
- # only calculate pos centerness targets, otherwise there may be nan
300
- gts = self.bbox_coder.decode(anchors, bbox_targets)
301
- anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
302
- anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
303
- l_ = anchors_cx - gts[:, 0]
304
- t_ = anchors_cy - gts[:, 1]
305
- r_ = gts[:, 2] - anchors_cx
306
- b_ = gts[:, 3] - anchors_cy
307
-
308
- left_right = torch.stack([l_, r_], dim=1)
309
- top_bottom = torch.stack([t_, b_], dim=1)
310
- centerness = torch.sqrt(
311
- (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
312
- (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
313
- assert not torch.isnan(centerness).any()
314
- return centerness
315
-
316
- @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
317
- def get_bboxes(self,
318
- cls_scores,
319
- bbox_preds,
320
- centernesses,
321
- img_metas,
322
- cfg=None,
323
- rescale=False,
324
- with_nms=True):
325
- """Transform network output for a batch into bbox predictions.
326
-
327
- Args:
328
- cls_scores (list[Tensor]): Box scores for each scale level
329
- with shape (N, num_anchors * num_classes, H, W).
330
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
331
- level with shape (N, num_anchors * 4, H, W).
332
- centernesses (list[Tensor]): Centerness for each scale level with
333
- shape (N, num_anchors * 1, H, W).
334
- img_metas (list[dict]): Meta information of each image, e.g.,
335
- image size, scaling factor, etc.
336
- cfg (mmcv.Config | None): Test / postprocessing configuration,
337
- if None, test_cfg would be used. Default: None.
338
- rescale (bool): If True, return boxes in original image space.
339
- Default: False.
340
- with_nms (bool): If True, do nms before return boxes.
341
- Default: True.
342
-
343
- Returns:
344
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
345
- The first item is an (n, 5) tensor, where 5 represent
346
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
347
- The shape of the second tensor in the tuple is (n,), and
348
- each element represents the class label of the corresponding
349
- box.
350
- """
351
- cfg = self.test_cfg if cfg is None else cfg
352
- assert len(cls_scores) == len(bbox_preds)
353
- num_levels = len(cls_scores)
354
- device = cls_scores[0].device
355
- featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
356
- mlvl_anchors = self.anchor_generator.grid_anchors(
357
- featmap_sizes, device=device)
358
-
359
- cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
360
- bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
361
- centerness_pred_list = [
362
- centernesses[i].detach() for i in range(num_levels)
363
- ]
364
- img_shapes = [
365
- img_metas[i]['img_shape'] for i in range(cls_scores[0].shape[0])
366
- ]
367
- scale_factors = [
368
- img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
369
- ]
370
- result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
371
- centerness_pred_list, mlvl_anchors,
372
- img_shapes, scale_factors, cfg, rescale,
373
- with_nms)
374
- return result_list
375
-
376
- def _get_bboxes(self,
377
- cls_scores,
378
- bbox_preds,
379
- centernesses,
380
- mlvl_anchors,
381
- img_shapes,
382
- scale_factors,
383
- cfg,
384
- rescale=False,
385
- with_nms=True):
386
- """Transform outputs for a single batch item into labeled boxes.
387
-
388
- Args:
389
- cls_scores (list[Tensor]): Box scores for a single scale level
390
- with shape (N, num_anchors * num_classes, H, W).
391
- bbox_preds (list[Tensor]): Box energies / deltas for a single
392
- scale level with shape (N, num_anchors * 4, H, W).
393
- centernesses (list[Tensor]): Centerness for a single scale level
394
- with shape (N, num_anchors * 1, H, W).
395
- mlvl_anchors (list[Tensor]): Box reference for a single scale level
396
- with shape (num_total_anchors, 4).
397
- img_shapes (list[tuple[int]]): Shape of the input image,
398
- list[(height, width, 3)].
399
- scale_factors (list[ndarray]): Scale factor of the image arrange as
400
- (w_scale, h_scale, w_scale, h_scale).
401
- cfg (mmcv.Config | None): Test / postprocessing configuration,
402
- if None, test_cfg would be used.
403
- rescale (bool): If True, return boxes in original image space.
404
- Default: False.
405
- with_nms (bool): If True, do nms before return boxes.
406
- Default: True.
407
-
408
- Returns:
409
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
410
- The first item is an (n, 5) tensor, where 5 represent
411
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
412
- The shape of the second tensor in the tuple is (n,), and
413
- each element represents the class label of the corresponding
414
- box.
415
- """
416
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
417
- device = cls_scores[0].device
418
- batch_size = cls_scores[0].shape[0]
419
- # convert to tensor to keep tracing
420
- nms_pre_tensor = torch.tensor(
421
- cfg.get('nms_pre', -1), device=device, dtype=torch.long)
422
- mlvl_bboxes = []
423
- mlvl_scores = []
424
- mlvl_centerness = []
425
- for cls_score, bbox_pred, centerness, anchors in zip(
426
- cls_scores, bbox_preds, centernesses, mlvl_anchors):
427
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
428
- scores = cls_score.permute(0, 2, 3, 1).reshape(
429
- batch_size, -1, self.cls_out_channels).sigmoid()
430
- centerness = centerness.permute(0, 2, 3,
431
- 1).reshape(batch_size,
432
- -1).sigmoid()
433
- bbox_pred = bbox_pred.permute(0, 2, 3,
434
- 1).reshape(batch_size, -1, 4)
435
-
436
- # Always keep topk op for dynamic input in onnx
437
- if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
438
- or scores.shape[-2] > nms_pre_tensor):
439
- from torch import _shape_as_tensor
440
- # keep shape as tensor and get k
441
- num_anchor = _shape_as_tensor(scores)[-2].to(device)
442
- nms_pre = torch.where(nms_pre_tensor < num_anchor,
443
- nms_pre_tensor, num_anchor)
444
-
445
- max_scores, _ = (scores * centerness[..., None]).max(-1)
446
- _, topk_inds = max_scores.topk(nms_pre)
447
- anchors = anchors[topk_inds, :]
448
- batch_inds = torch.arange(batch_size).view(
449
- -1, 1).expand_as(topk_inds).long()
450
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
451
- scores = scores[batch_inds, topk_inds, :]
452
- centerness = centerness[batch_inds, topk_inds]
453
- else:
454
- anchors = anchors.expand_as(bbox_pred)
455
-
456
- bboxes = self.bbox_coder.decode(
457
- anchors, bbox_pred, max_shape=img_shapes)
458
- mlvl_bboxes.append(bboxes)
459
- mlvl_scores.append(scores)
460
- mlvl_centerness.append(centerness)
461
-
462
- batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
463
- if rescale:
464
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
465
- scale_factors).unsqueeze(1)
466
- batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
467
- batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
468
-
469
- # Set max number of box to be feed into nms in deployment
470
- deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
471
- if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
472
- batch_mlvl_scores, _ = (
473
- batch_mlvl_scores *
474
- batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
475
- ).max(-1)
476
- _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
477
- batch_inds = torch.arange(batch_size).view(-1,
478
- 1).expand_as(topk_inds)
479
- batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
480
- batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
481
- batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
482
- topk_inds]
483
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
484
- # BG cat_id: num_class
485
- padding = batch_mlvl_scores.new_zeros(batch_size,
486
- batch_mlvl_scores.shape[1], 1)
487
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
488
-
489
- if with_nms:
490
- det_results = []
491
- for (mlvl_bboxes, mlvl_scores,
492
- mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
493
- batch_mlvl_centerness):
494
- det_bbox, det_label = multiclass_nms(
495
- mlvl_bboxes,
496
- mlvl_scores,
497
- cfg.score_thr,
498
- cfg.nms,
499
- cfg.max_per_img,
500
- score_factors=mlvl_centerness)
501
- det_results.append(tuple([det_bbox, det_label]))
502
- else:
503
- det_results = [
504
- tuple(mlvl_bs)
505
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
506
- batch_mlvl_centerness)
507
- ]
508
- return det_results
509
-
510
- def get_targets(self,
511
- anchor_list,
512
- valid_flag_list,
513
- gt_bboxes_list,
514
- img_metas,
515
- gt_bboxes_ignore_list=None,
516
- gt_labels_list=None,
517
- label_channels=1,
518
- unmap_outputs=True):
519
- """Get targets for ATSS head.
520
-
521
- This method is almost the same as `AnchorHead.get_targets()`. Besides
522
- returning the targets as the parent method does, it also returns the
523
- anchors as the first element of the returned tuple.
524
- """
525
- num_imgs = len(img_metas)
526
- assert len(anchor_list) == len(valid_flag_list) == num_imgs
527
-
528
- # anchor number of multi levels
529
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
530
- num_level_anchors_list = [num_level_anchors] * num_imgs
531
-
532
- # concat all level anchors and flags to a single tensor
533
- for i in range(num_imgs):
534
- assert len(anchor_list[i]) == len(valid_flag_list[i])
535
- anchor_list[i] = torch.cat(anchor_list[i])
536
- valid_flag_list[i] = torch.cat(valid_flag_list[i])
537
-
538
- # compute targets for each image
539
- if gt_bboxes_ignore_list is None:
540
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
541
- if gt_labels_list is None:
542
- gt_labels_list = [None for _ in range(num_imgs)]
543
- (all_anchors, all_labels, all_label_weights, all_bbox_targets,
544
- all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
545
- self._get_target_single,
546
- anchor_list,
547
- valid_flag_list,
548
- num_level_anchors_list,
549
- gt_bboxes_list,
550
- gt_bboxes_ignore_list,
551
- gt_labels_list,
552
- img_metas,
553
- label_channels=label_channels,
554
- unmap_outputs=unmap_outputs)
555
- # no valid anchors
556
- if any([labels is None for labels in all_labels]):
557
- return None
558
- # sampled anchors of all images
559
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
560
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
561
- # split targets to a list w.r.t. multiple levels
562
- anchors_list = images_to_levels(all_anchors, num_level_anchors)
563
- labels_list = images_to_levels(all_labels, num_level_anchors)
564
- label_weights_list = images_to_levels(all_label_weights,
565
- num_level_anchors)
566
- bbox_targets_list = images_to_levels(all_bbox_targets,
567
- num_level_anchors)
568
- bbox_weights_list = images_to_levels(all_bbox_weights,
569
- num_level_anchors)
570
- return (anchors_list, labels_list, label_weights_list,
571
- bbox_targets_list, bbox_weights_list, num_total_pos,
572
- num_total_neg)
573
-
574
- def _get_target_single(self,
575
- flat_anchors,
576
- valid_flags,
577
- num_level_anchors,
578
- gt_bboxes,
579
- gt_bboxes_ignore,
580
- gt_labels,
581
- img_meta,
582
- label_channels=1,
583
- unmap_outputs=True):
584
- """Compute regression, classification targets for anchors in a single
585
- image.
586
-
587
- Args:
588
- flat_anchors (Tensor): Multi-level anchors of the image, which are
589
- concatenated into a single tensor of shape (num_anchors ,4)
590
- valid_flags (Tensor): Multi level valid flags of the image,
591
- which are concatenated into a single tensor of
592
- shape (num_anchors,).
593
- num_level_anchors Tensor): Number of anchors of each scale level.
594
- gt_bboxes (Tensor): Ground truth bboxes of the image,
595
- shape (num_gts, 4).
596
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
597
- ignored, shape (num_ignored_gts, 4).
598
- gt_labels (Tensor): Ground truth labels of each box,
599
- shape (num_gts,).
600
- img_meta (dict): Meta info of the image.
601
- label_channels (int): Channel of label.
602
- unmap_outputs (bool): Whether to map outputs back to the original
603
- set of anchors.
604
-
605
- Returns:
606
- tuple: N is the number of total anchors in the image.
607
- labels (Tensor): Labels of all anchors in the image with shape
608
- (N,).
609
- label_weights (Tensor): Label weights of all anchor in the
610
- image with shape (N,).
611
- bbox_targets (Tensor): BBox targets of all anchors in the
612
- image with shape (N, 4).
613
- bbox_weights (Tensor): BBox weights of all anchors in the
614
- image with shape (N, 4)
615
- pos_inds (Tensor): Indices of positive anchor with shape
616
- (num_pos,).
617
- neg_inds (Tensor): Indices of negative anchor with shape
618
- (num_neg,).
619
- """
620
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
621
- img_meta['img_shape'][:2],
622
- self.train_cfg.allowed_border)
623
- if not inside_flags.any():
624
- return (None, ) * 7
625
- # assign gt and sample anchors
626
- anchors = flat_anchors[inside_flags, :]
627
-
628
- num_level_anchors_inside = self.get_num_level_anchors_inside(
629
- num_level_anchors, inside_flags)
630
- assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
631
- gt_bboxes, gt_bboxes_ignore,
632
- gt_labels)
633
-
634
- sampling_result = self.sampler.sample(assign_result, anchors,
635
- gt_bboxes)
636
-
637
- num_valid_anchors = anchors.shape[0]
638
- bbox_targets = torch.zeros_like(anchors)
639
- bbox_weights = torch.zeros_like(anchors)
640
- labels = anchors.new_full((num_valid_anchors, ),
641
- self.num_classes,
642
- dtype=torch.long)
643
- label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
644
-
645
- pos_inds = sampling_result.pos_inds
646
- neg_inds = sampling_result.neg_inds
647
- if len(pos_inds) > 0:
648
- if hasattr(self, 'bbox_coder'):
649
- pos_bbox_targets = self.bbox_coder.encode(
650
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
651
- else:
652
- # used in VFNetHead
653
- pos_bbox_targets = sampling_result.pos_gt_bboxes
654
- bbox_targets[pos_inds, :] = pos_bbox_targets
655
- bbox_weights[pos_inds, :] = 1.0
656
- if gt_labels is None:
657
- # Only rpn gives gt_labels as None
658
- # Foreground is the first class since v2.5.0
659
- labels[pos_inds] = 0
660
- else:
661
- labels[pos_inds] = gt_labels[
662
- sampling_result.pos_assigned_gt_inds]
663
- if self.train_cfg.pos_weight <= 0:
664
- label_weights[pos_inds] = 1.0
665
- else:
666
- label_weights[pos_inds] = self.train_cfg.pos_weight
667
- if len(neg_inds) > 0:
668
- label_weights[neg_inds] = 1.0
669
-
670
- # map up to original set of anchors
671
- if unmap_outputs:
672
- num_total_anchors = flat_anchors.size(0)
673
- anchors = unmap(anchors, num_total_anchors, inside_flags)
674
- labels = unmap(
675
- labels, num_total_anchors, inside_flags, fill=self.num_classes)
676
- label_weights = unmap(label_weights, num_total_anchors,
677
- inside_flags)
678
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
679
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
680
-
681
- return (anchors, labels, label_weights, bbox_targets, bbox_weights,
682
- pos_inds, neg_inds)
683
-
684
- def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
685
- split_inside_flags = torch.split(inside_flags, num_level_anchors)
686
- num_level_anchors_inside = [
687
- int(flags.sum()) for flags in split_inside_flags
688
- ]
689
- return num_level_anchors_inside
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/guided_anchor_head.py DELETED
@@ -1,860 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from mmcv.cnn import bias_init_with_prob, normal_init
4
- from mmcv.ops import DeformConv2d, MaskedConv2d
5
- from mmcv.runner import force_fp32
6
-
7
- from mmdet.core import (anchor_inside_flags, build_anchor_generator,
8
- build_assigner, build_bbox_coder, build_sampler,
9
- calc_region, images_to_levels, multi_apply,
10
- multiclass_nms, unmap)
11
- from ..builder import HEADS, build_loss
12
- from .anchor_head import AnchorHead
13
-
14
-
15
- class FeatureAdaption(nn.Module):
16
- """Feature Adaption Module.
17
-
18
- Feature Adaption Module is implemented based on DCN v1.
19
- It uses anchor shape prediction rather than feature map to
20
- predict offsets of deform conv layer.
21
-
22
- Args:
23
- in_channels (int): Number of channels in the input feature map.
24
- out_channels (int): Number of channels in the output feature map.
25
- kernel_size (int): Deformable conv kernel size.
26
- deform_groups (int): Deformable conv group size.
27
- """
28
-
29
- def __init__(self,
30
- in_channels,
31
- out_channels,
32
- kernel_size=3,
33
- deform_groups=4):
34
- super(FeatureAdaption, self).__init__()
35
- offset_channels = kernel_size * kernel_size * 2
36
- self.conv_offset = nn.Conv2d(
37
- 2, deform_groups * offset_channels, 1, bias=False)
38
- self.conv_adaption = DeformConv2d(
39
- in_channels,
40
- out_channels,
41
- kernel_size=kernel_size,
42
- padding=(kernel_size - 1) // 2,
43
- deform_groups=deform_groups)
44
- self.relu = nn.ReLU(inplace=True)
45
-
46
- def init_weights(self):
47
- normal_init(self.conv_offset, std=0.1)
48
- normal_init(self.conv_adaption, std=0.01)
49
-
50
- def forward(self, x, shape):
51
- offset = self.conv_offset(shape.detach())
52
- x = self.relu(self.conv_adaption(x, offset))
53
- return x
54
-
55
-
56
- @HEADS.register_module()
57
- class GuidedAnchorHead(AnchorHead):
58
- """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).
59
-
60
- This GuidedAnchorHead will predict high-quality feature guided
61
- anchors and locations where anchors will be kept in inference.
62
- There are mainly 3 categories of bounding-boxes.
63
-
64
- - Sampled 9 pairs for target assignment. (approxes)
65
- - The square boxes where the predicted anchors are based on. (squares)
66
- - Guided anchors.
67
-
68
- Please refer to https://arxiv.org/abs/1901.03278 for more details.
69
-
70
- Args:
71
- num_classes (int): Number of classes.
72
- in_channels (int): Number of channels in the input feature map.
73
- feat_channels (int): Number of hidden channels.
74
- approx_anchor_generator (dict): Config dict for approx generator
75
- square_anchor_generator (dict): Config dict for square generator
76
- anchor_coder (dict): Config dict for anchor coder
77
- bbox_coder (dict): Config dict for bbox coder
78
- reg_decoded_bbox (bool): If true, the regression loss would be
79
- applied directly on decoded bounding boxes, converting both
80
- the predicted boxes and regression targets to absolute
81
- coordinates format. Default False. It should be `True` when
82
- using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
83
- deform_groups: (int): Group number of DCN in
84
- FeatureAdaption module.
85
- loc_filter_thr (float): Threshold to filter out unconcerned regions.
86
- loss_loc (dict): Config of location loss.
87
- loss_shape (dict): Config of anchor shape loss.
88
- loss_cls (dict): Config of classification loss.
89
- loss_bbox (dict): Config of bbox regression loss.
90
- """
91
-
92
- def __init__(
93
- self,
94
- num_classes,
95
- in_channels,
96
- feat_channels=256,
97
- approx_anchor_generator=dict(
98
- type='AnchorGenerator',
99
- octave_base_scale=8,
100
- scales_per_octave=3,
101
- ratios=[0.5, 1.0, 2.0],
102
- strides=[4, 8, 16, 32, 64]),
103
- square_anchor_generator=dict(
104
- type='AnchorGenerator',
105
- ratios=[1.0],
106
- scales=[8],
107
- strides=[4, 8, 16, 32, 64]),
108
- anchor_coder=dict(
109
- type='DeltaXYWHBBoxCoder',
110
- target_means=[.0, .0, .0, .0],
111
- target_stds=[1.0, 1.0, 1.0, 1.0]
112
- ),
113
- bbox_coder=dict(
114
- type='DeltaXYWHBBoxCoder',
115
- target_means=[.0, .0, .0, .0],
116
- target_stds=[1.0, 1.0, 1.0, 1.0]
117
- ),
118
- reg_decoded_bbox=False,
119
- deform_groups=4,
120
- loc_filter_thr=0.01,
121
- train_cfg=None,
122
- test_cfg=None,
123
- loss_loc=dict(
124
- type='FocalLoss',
125
- use_sigmoid=True,
126
- gamma=2.0,
127
- alpha=0.25,
128
- loss_weight=1.0),
129
- loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
130
- loss_cls=dict(
131
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
132
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
133
- loss_weight=1.0)): # yapf: disable
134
- super(AnchorHead, self).__init__()
135
- self.in_channels = in_channels
136
- self.num_classes = num_classes
137
- self.feat_channels = feat_channels
138
- self.deform_groups = deform_groups
139
- self.loc_filter_thr = loc_filter_thr
140
-
141
- # build approx_anchor_generator and square_anchor_generator
142
- assert (approx_anchor_generator['octave_base_scale'] ==
143
- square_anchor_generator['scales'][0])
144
- assert (approx_anchor_generator['strides'] ==
145
- square_anchor_generator['strides'])
146
- self.approx_anchor_generator = build_anchor_generator(
147
- approx_anchor_generator)
148
- self.square_anchor_generator = build_anchor_generator(
149
- square_anchor_generator)
150
- self.approxs_per_octave = self.approx_anchor_generator \
151
- .num_base_anchors[0]
152
-
153
- self.reg_decoded_bbox = reg_decoded_bbox
154
-
155
- # one anchor per location
156
- self.num_anchors = 1
157
- self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
158
- self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']
159
- self.sampling = loss_cls['type'] not in ['FocalLoss']
160
- self.ga_sampling = train_cfg is not None and hasattr(
161
- train_cfg, 'ga_sampler')
162
- if self.use_sigmoid_cls:
163
- self.cls_out_channels = self.num_classes
164
- else:
165
- self.cls_out_channels = self.num_classes + 1
166
-
167
- # build bbox_coder
168
- self.anchor_coder = build_bbox_coder(anchor_coder)
169
- self.bbox_coder = build_bbox_coder(bbox_coder)
170
-
171
- # build losses
172
- self.loss_loc = build_loss(loss_loc)
173
- self.loss_shape = build_loss(loss_shape)
174
- self.loss_cls = build_loss(loss_cls)
175
- self.loss_bbox = build_loss(loss_bbox)
176
-
177
- self.train_cfg = train_cfg
178
- self.test_cfg = test_cfg
179
-
180
- if self.train_cfg:
181
- self.assigner = build_assigner(self.train_cfg.assigner)
182
- # use PseudoSampler when sampling is False
183
- if self.sampling and hasattr(self.train_cfg, 'sampler'):
184
- sampler_cfg = self.train_cfg.sampler
185
- else:
186
- sampler_cfg = dict(type='PseudoSampler')
187
- self.sampler = build_sampler(sampler_cfg, context=self)
188
-
189
- self.ga_assigner = build_assigner(self.train_cfg.ga_assigner)
190
- if self.ga_sampling:
191
- ga_sampler_cfg = self.train_cfg.ga_sampler
192
- else:
193
- ga_sampler_cfg = dict(type='PseudoSampler')
194
- self.ga_sampler = build_sampler(ga_sampler_cfg, context=self)
195
-
196
- self.fp16_enabled = False
197
-
198
- self._init_layers()
199
-
200
- def _init_layers(self):
201
- self.relu = nn.ReLU(inplace=True)
202
- self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)
203
- self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1)
204
- self.feature_adaption = FeatureAdaption(
205
- self.in_channels,
206
- self.feat_channels,
207
- kernel_size=3,
208
- deform_groups=self.deform_groups)
209
- self.conv_cls = MaskedConv2d(self.feat_channels,
210
- self.num_anchors * self.cls_out_channels,
211
- 1)
212
- self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
213
- 1)
214
-
215
- def init_weights(self):
216
- normal_init(self.conv_cls, std=0.01)
217
- normal_init(self.conv_reg, std=0.01)
218
-
219
- bias_cls = bias_init_with_prob(0.01)
220
- normal_init(self.conv_loc, std=0.01, bias=bias_cls)
221
- normal_init(self.conv_shape, std=0.01)
222
-
223
- self.feature_adaption.init_weights()
224
-
225
- def forward_single(self, x):
226
- loc_pred = self.conv_loc(x)
227
- shape_pred = self.conv_shape(x)
228
- x = self.feature_adaption(x, shape_pred)
229
- # masked conv is only used during inference for speed-up
230
- if not self.training:
231
- mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
232
- else:
233
- mask = None
234
- cls_score = self.conv_cls(x, mask)
235
- bbox_pred = self.conv_reg(x, mask)
236
- return cls_score, bbox_pred, shape_pred, loc_pred
237
-
238
- def forward(self, feats):
239
- return multi_apply(self.forward_single, feats)
240
-
241
- def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
242
- """Get sampled approxs and inside flags according to feature map sizes.
243
-
244
- Args:
245
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
246
- img_metas (list[dict]): Image meta info.
247
- device (torch.device | str): device for returned tensors
248
-
249
- Returns:
250
- tuple: approxes of each image, inside flags of each image
251
- """
252
- num_imgs = len(img_metas)
253
-
254
- # since feature map sizes of all images are the same, we only compute
255
- # approxes for one time
256
- multi_level_approxs = self.approx_anchor_generator.grid_anchors(
257
- featmap_sizes, device=device)
258
- approxs_list = [multi_level_approxs for _ in range(num_imgs)]
259
-
260
- # for each image, we compute inside flags of multi level approxes
261
- inside_flag_list = []
262
- for img_id, img_meta in enumerate(img_metas):
263
- multi_level_flags = []
264
- multi_level_approxs = approxs_list[img_id]
265
-
266
- # obtain valid flags for each approx first
267
- multi_level_approx_flags = self.approx_anchor_generator \
268
- .valid_flags(featmap_sizes,
269
- img_meta['pad_shape'],
270
- device=device)
271
-
272
- for i, flags in enumerate(multi_level_approx_flags):
273
- approxs = multi_level_approxs[i]
274
- inside_flags_list = []
275
- for i in range(self.approxs_per_octave):
276
- split_valid_flags = flags[i::self.approxs_per_octave]
277
- split_approxs = approxs[i::self.approxs_per_octave, :]
278
- inside_flags = anchor_inside_flags(
279
- split_approxs, split_valid_flags,
280
- img_meta['img_shape'][:2],
281
- self.train_cfg.allowed_border)
282
- inside_flags_list.append(inside_flags)
283
- # inside_flag for a position is true if any anchor in this
284
- # position is true
285
- inside_flags = (
286
- torch.stack(inside_flags_list, 0).sum(dim=0) > 0)
287
- multi_level_flags.append(inside_flags)
288
- inside_flag_list.append(multi_level_flags)
289
- return approxs_list, inside_flag_list
290
-
291
- def get_anchors(self,
292
- featmap_sizes,
293
- shape_preds,
294
- loc_preds,
295
- img_metas,
296
- use_loc_filter=False,
297
- device='cuda'):
298
- """Get squares according to feature map sizes and guided anchors.
299
-
300
- Args:
301
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
302
- shape_preds (list[tensor]): Multi-level shape predictions.
303
- loc_preds (list[tensor]): Multi-level location predictions.
304
- img_metas (list[dict]): Image meta info.
305
- use_loc_filter (bool): Use loc filter or not.
306
- device (torch.device | str): device for returned tensors
307
-
308
- Returns:
309
- tuple: square approxs of each image, guided anchors of each image,
310
- loc masks of each image
311
- """
312
- num_imgs = len(img_metas)
313
- num_levels = len(featmap_sizes)
314
-
315
- # since feature map sizes of all images are the same, we only compute
316
- # squares for one time
317
- multi_level_squares = self.square_anchor_generator.grid_anchors(
318
- featmap_sizes, device=device)
319
- squares_list = [multi_level_squares for _ in range(num_imgs)]
320
-
321
- # for each image, we compute multi level guided anchors
322
- guided_anchors_list = []
323
- loc_mask_list = []
324
- for img_id, img_meta in enumerate(img_metas):
325
- multi_level_guided_anchors = []
326
- multi_level_loc_mask = []
327
- for i in range(num_levels):
328
- squares = squares_list[img_id][i]
329
- shape_pred = shape_preds[i][img_id]
330
- loc_pred = loc_preds[i][img_id]
331
- guided_anchors, loc_mask = self._get_guided_anchors_single(
332
- squares,
333
- shape_pred,
334
- loc_pred,
335
- use_loc_filter=use_loc_filter)
336
- multi_level_guided_anchors.append(guided_anchors)
337
- multi_level_loc_mask.append(loc_mask)
338
- guided_anchors_list.append(multi_level_guided_anchors)
339
- loc_mask_list.append(multi_level_loc_mask)
340
- return squares_list, guided_anchors_list, loc_mask_list
341
-
342
- def _get_guided_anchors_single(self,
343
- squares,
344
- shape_pred,
345
- loc_pred,
346
- use_loc_filter=False):
347
- """Get guided anchors and loc masks for a single level.
348
-
349
- Args:
350
- square (tensor): Squares of a single level.
351
- shape_pred (tensor): Shape predections of a single level.
352
- loc_pred (tensor): Loc predections of a single level.
353
- use_loc_filter (list[tensor]): Use loc filter or not.
354
-
355
- Returns:
356
- tuple: guided anchors, location masks
357
- """
358
- # calculate location filtering mask
359
- loc_pred = loc_pred.sigmoid().detach()
360
- if use_loc_filter:
361
- loc_mask = loc_pred >= self.loc_filter_thr
362
- else:
363
- loc_mask = loc_pred >= 0.0
364
- mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
365
- mask = mask.contiguous().view(-1)
366
- # calculate guided anchors
367
- squares = squares[mask]
368
- anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(
369
- -1, 2).detach()[mask]
370
- bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
371
- bbox_deltas[:, 2:] = anchor_deltas
372
- guided_anchors = self.anchor_coder.decode(
373
- squares, bbox_deltas, wh_ratio_clip=1e-6)
374
- return guided_anchors, mask
375
-
376
- def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
377
- """Compute location targets for guided anchoring.
378
-
379
- Each feature map is divided into positive, negative and ignore regions.
380
- - positive regions: target 1, weight 1
381
- - ignore regions: target 0, weight 0
382
- - negative regions: target 0, weight 0.1
383
-
384
- Args:
385
- gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
386
- featmap_sizes (list[tuple]): Multi level sizes of each feature
387
- maps.
388
-
389
- Returns:
390
- tuple
391
- """
392
- anchor_scale = self.approx_anchor_generator.octave_base_scale
393
- anchor_strides = self.approx_anchor_generator.strides
394
- # Currently only supports same stride in x and y direction.
395
- for stride in anchor_strides:
396
- assert (stride[0] == stride[1])
397
- anchor_strides = [stride[0] for stride in anchor_strides]
398
-
399
- center_ratio = self.train_cfg.center_ratio
400
- ignore_ratio = self.train_cfg.ignore_ratio
401
- img_per_gpu = len(gt_bboxes_list)
402
- num_lvls = len(featmap_sizes)
403
- r1 = (1 - center_ratio) / 2
404
- r2 = (1 - ignore_ratio) / 2
405
- all_loc_targets = []
406
- all_loc_weights = []
407
- all_ignore_map = []
408
- for lvl_id in range(num_lvls):
409
- h, w = featmap_sizes[lvl_id]
410
- loc_targets = torch.zeros(
411
- img_per_gpu,
412
- 1,
413
- h,
414
- w,
415
- device=gt_bboxes_list[0].device,
416
- dtype=torch.float32)
417
- loc_weights = torch.full_like(loc_targets, -1)
418
- ignore_map = torch.zeros_like(loc_targets)
419
- all_loc_targets.append(loc_targets)
420
- all_loc_weights.append(loc_weights)
421
- all_ignore_map.append(ignore_map)
422
- for img_id in range(img_per_gpu):
423
- gt_bboxes = gt_bboxes_list[img_id]
424
- scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
425
- (gt_bboxes[:, 3] - gt_bboxes[:, 1]))
426
- min_anchor_size = scale.new_full(
427
- (1, ), float(anchor_scale * anchor_strides[0]))
428
- # assign gt bboxes to different feature levels w.r.t. their scales
429
- target_lvls = torch.floor(
430
- torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
431
- target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
432
- for gt_id in range(gt_bboxes.size(0)):
433
- lvl = target_lvls[gt_id].item()
434
- # rescaled to corresponding feature map
435
- gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
436
- # calculate ignore regions
437
- ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
438
- gt_, r2, featmap_sizes[lvl])
439
- # calculate positive (center) regions
440
- ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
441
- gt_, r1, featmap_sizes[lvl])
442
- all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
443
- ctr_x1:ctr_x2 + 1] = 1
444
- all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
445
- ignore_x1:ignore_x2 + 1] = 0
446
- all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
447
- ctr_x1:ctr_x2 + 1] = 1
448
- # calculate ignore map on nearby low level feature
449
- if lvl > 0:
450
- d_lvl = lvl - 1
451
- # rescaled to corresponding feature map
452
- gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
453
- ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
454
- gt_, r2, featmap_sizes[d_lvl])
455
- all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
456
- ignore_x1:ignore_x2 + 1] = 1
457
- # calculate ignore map on nearby high level feature
458
- if lvl < num_lvls - 1:
459
- u_lvl = lvl + 1
460
- # rescaled to corresponding feature map
461
- gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
462
- ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
463
- gt_, r2, featmap_sizes[u_lvl])
464
- all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
465
- ignore_x1:ignore_x2 + 1] = 1
466
- for lvl_id in range(num_lvls):
467
- # ignore negative regions w.r.t. ignore map
468
- all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
469
- & (all_ignore_map[lvl_id] > 0)] = 0
470
- # set negative regions with weight 0.1
471
- all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
472
- # loc average factor to balance loss
473
- loc_avg_factor = sum(
474
- [t.size(0) * t.size(-1) * t.size(-2)
475
- for t in all_loc_targets]) / 200
476
- return all_loc_targets, all_loc_weights, loc_avg_factor
477
-
478
- def _ga_shape_target_single(self,
479
- flat_approxs,
480
- inside_flags,
481
- flat_squares,
482
- gt_bboxes,
483
- gt_bboxes_ignore,
484
- img_meta,
485
- unmap_outputs=True):
486
- """Compute guided anchoring targets.
487
-
488
- This function returns sampled anchors and gt bboxes directly
489
- rather than calculates regression targets.
490
-
491
- Args:
492
- flat_approxs (Tensor): flat approxs of a single image,
493
- shape (n, 4)
494
- inside_flags (Tensor): inside flags of a single image,
495
- shape (n, ).
496
- flat_squares (Tensor): flat squares of a single image,
497
- shape (approxs_per_octave * n, 4)
498
- gt_bboxes (Tensor): Ground truth bboxes of a single image.
499
- img_meta (dict): Meta info of a single image.
500
- approxs_per_octave (int): number of approxs per octave
501
- cfg (dict): RPN train configs.
502
- unmap_outputs (bool): unmap outputs or not.
503
-
504
- Returns:
505
- tuple
506
- """
507
- if not inside_flags.any():
508
- return (None, ) * 5
509
- # assign gt and sample anchors
510
- expand_inside_flags = inside_flags[:, None].expand(
511
- -1, self.approxs_per_octave).reshape(-1)
512
- approxs = flat_approxs[expand_inside_flags, :]
513
- squares = flat_squares[inside_flags, :]
514
-
515
- assign_result = self.ga_assigner.assign(approxs, squares,
516
- self.approxs_per_octave,
517
- gt_bboxes, gt_bboxes_ignore)
518
- sampling_result = self.ga_sampler.sample(assign_result, squares,
519
- gt_bboxes)
520
-
521
- bbox_anchors = torch.zeros_like(squares)
522
- bbox_gts = torch.zeros_like(squares)
523
- bbox_weights = torch.zeros_like(squares)
524
-
525
- pos_inds = sampling_result.pos_inds
526
- neg_inds = sampling_result.neg_inds
527
- if len(pos_inds) > 0:
528
- bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes
529
- bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes
530
- bbox_weights[pos_inds, :] = 1.0
531
-
532
- # map up to original set of anchors
533
- if unmap_outputs:
534
- num_total_anchors = flat_squares.size(0)
535
- bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)
536
- bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)
537
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
538
-
539
- return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
540
-
541
- def ga_shape_targets(self,
542
- approx_list,
543
- inside_flag_list,
544
- square_list,
545
- gt_bboxes_list,
546
- img_metas,
547
- gt_bboxes_ignore_list=None,
548
- unmap_outputs=True):
549
- """Compute guided anchoring targets.
550
-
551
- Args:
552
- approx_list (list[list]): Multi level approxs of each image.
553
- inside_flag_list (list[list]): Multi level inside flags of each
554
- image.
555
- square_list (list[list]): Multi level squares of each image.
556
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
557
- img_metas (list[dict]): Meta info of each image.
558
- gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
559
- unmap_outputs (bool): unmap outputs or not.
560
-
561
- Returns:
562
- tuple
563
- """
564
- num_imgs = len(img_metas)
565
- assert len(approx_list) == len(inside_flag_list) == len(
566
- square_list) == num_imgs
567
- # anchor number of multi levels
568
- num_level_squares = [squares.size(0) for squares in square_list[0]]
569
- # concat all level anchors and flags to a single tensor
570
- inside_flag_flat_list = []
571
- approx_flat_list = []
572
- square_flat_list = []
573
- for i in range(num_imgs):
574
- assert len(square_list[i]) == len(inside_flag_list[i])
575
- inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
576
- approx_flat_list.append(torch.cat(approx_list[i]))
577
- square_flat_list.append(torch.cat(square_list[i]))
578
-
579
- # compute targets for each image
580
- if gt_bboxes_ignore_list is None:
581
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
582
- (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
583
- neg_inds_list) = multi_apply(
584
- self._ga_shape_target_single,
585
- approx_flat_list,
586
- inside_flag_flat_list,
587
- square_flat_list,
588
- gt_bboxes_list,
589
- gt_bboxes_ignore_list,
590
- img_metas,
591
- unmap_outputs=unmap_outputs)
592
- # no valid anchors
593
- if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
594
- return None
595
- # sampled anchors of all images
596
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
597
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
598
- # split targets to a list w.r.t. multiple levels
599
- bbox_anchors_list = images_to_levels(all_bbox_anchors,
600
- num_level_squares)
601
- bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)
602
- bbox_weights_list = images_to_levels(all_bbox_weights,
603
- num_level_squares)
604
- return (bbox_anchors_list, bbox_gts_list, bbox_weights_list,
605
- num_total_pos, num_total_neg)
606
-
607
- def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
608
- anchor_weights, anchor_total_num):
609
- shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)
610
- bbox_anchors = bbox_anchors.contiguous().view(-1, 4)
611
- bbox_gts = bbox_gts.contiguous().view(-1, 4)
612
- anchor_weights = anchor_weights.contiguous().view(-1, 4)
613
- bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)
614
- bbox_deltas[:, 2:] += shape_pred
615
- # filter out negative samples to speed-up weighted_bounded_iou_loss
616
- inds = torch.nonzero(
617
- anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1)
618
- bbox_deltas_ = bbox_deltas[inds]
619
- bbox_anchors_ = bbox_anchors[inds]
620
- bbox_gts_ = bbox_gts[inds]
621
- anchor_weights_ = anchor_weights[inds]
622
- pred_anchors_ = self.anchor_coder.decode(
623
- bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6)
624
- loss_shape = self.loss_shape(
625
- pred_anchors_,
626
- bbox_gts_,
627
- anchor_weights_,
628
- avg_factor=anchor_total_num)
629
- return loss_shape
630
-
631
- def loss_loc_single(self, loc_pred, loc_target, loc_weight,
632
- loc_avg_factor):
633
- loss_loc = self.loss_loc(
634
- loc_pred.reshape(-1, 1),
635
- loc_target.reshape(-1).long(),
636
- loc_weight.reshape(-1),
637
- avg_factor=loc_avg_factor)
638
- return loss_loc
639
-
640
- @force_fp32(
641
- apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
642
- def loss(self,
643
- cls_scores,
644
- bbox_preds,
645
- shape_preds,
646
- loc_preds,
647
- gt_bboxes,
648
- gt_labels,
649
- img_metas,
650
- gt_bboxes_ignore=None):
651
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
652
- assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
653
-
654
- device = cls_scores[0].device
655
-
656
- # get loc targets
657
- loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets(
658
- gt_bboxes, featmap_sizes)
659
-
660
- # get sampled approxes
661
- approxs_list, inside_flag_list = self.get_sampled_approxs(
662
- featmap_sizes, img_metas, device=device)
663
- # get squares and guided anchors
664
- squares_list, guided_anchors_list, _ = self.get_anchors(
665
- featmap_sizes, shape_preds, loc_preds, img_metas, device=device)
666
-
667
- # get shape targets
668
- shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list,
669
- squares_list, gt_bboxes,
670
- img_metas)
671
- if shape_targets is None:
672
- return None
673
- (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,
674
- anchor_bg_num) = shape_targets
675
- anchor_total_num = (
676
- anchor_fg_num if not self.ga_sampling else anchor_fg_num +
677
- anchor_bg_num)
678
-
679
- # get anchor targets
680
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
681
- cls_reg_targets = self.get_targets(
682
- guided_anchors_list,
683
- inside_flag_list,
684
- gt_bboxes,
685
- img_metas,
686
- gt_bboxes_ignore_list=gt_bboxes_ignore,
687
- gt_labels_list=gt_labels,
688
- label_channels=label_channels)
689
- if cls_reg_targets is None:
690
- return None
691
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
692
- num_total_pos, num_total_neg) = cls_reg_targets
693
- num_total_samples = (
694
- num_total_pos + num_total_neg if self.sampling else num_total_pos)
695
-
696
- # anchor number of multi levels
697
- num_level_anchors = [
698
- anchors.size(0) for anchors in guided_anchors_list[0]
699
- ]
700
- # concat all level anchors to a single tensor
701
- concat_anchor_list = []
702
- for i in range(len(guided_anchors_list)):
703
- concat_anchor_list.append(torch.cat(guided_anchors_list[i]))
704
- all_anchor_list = images_to_levels(concat_anchor_list,
705
- num_level_anchors)
706
-
707
- # get classification and bbox regression losses
708
- losses_cls, losses_bbox = multi_apply(
709
- self.loss_single,
710
- cls_scores,
711
- bbox_preds,
712
- all_anchor_list,
713
- labels_list,
714
- label_weights_list,
715
- bbox_targets_list,
716
- bbox_weights_list,
717
- num_total_samples=num_total_samples)
718
-
719
- # get anchor location loss
720
- losses_loc = []
721
- for i in range(len(loc_preds)):
722
- loss_loc = self.loss_loc_single(
723
- loc_preds[i],
724
- loc_targets[i],
725
- loc_weights[i],
726
- loc_avg_factor=loc_avg_factor)
727
- losses_loc.append(loss_loc)
728
-
729
- # get anchor shape loss
730
- losses_shape = []
731
- for i in range(len(shape_preds)):
732
- loss_shape = self.loss_shape_single(
733
- shape_preds[i],
734
- bbox_anchors_list[i],
735
- bbox_gts_list[i],
736
- anchor_weights_list[i],
737
- anchor_total_num=anchor_total_num)
738
- losses_shape.append(loss_shape)
739
-
740
- return dict(
741
- loss_cls=losses_cls,
742
- loss_bbox=losses_bbox,
743
- loss_shape=losses_shape,
744
- loss_loc=losses_loc)
745
-
746
- @force_fp32(
747
- apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
748
- def get_bboxes(self,
749
- cls_scores,
750
- bbox_preds,
751
- shape_preds,
752
- loc_preds,
753
- img_metas,
754
- cfg=None,
755
- rescale=False):
756
- assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(
757
- loc_preds)
758
- num_levels = len(cls_scores)
759
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
760
- device = cls_scores[0].device
761
- # get guided anchors
762
- _, guided_anchors, loc_masks = self.get_anchors(
763
- featmap_sizes,
764
- shape_preds,
765
- loc_preds,
766
- img_metas,
767
- use_loc_filter=not self.training,
768
- device=device)
769
- result_list = []
770
- for img_id in range(len(img_metas)):
771
- cls_score_list = [
772
- cls_scores[i][img_id].detach() for i in range(num_levels)
773
- ]
774
- bbox_pred_list = [
775
- bbox_preds[i][img_id].detach() for i in range(num_levels)
776
- ]
777
- guided_anchor_list = [
778
- guided_anchors[img_id][i].detach() for i in range(num_levels)
779
- ]
780
- loc_mask_list = [
781
- loc_masks[img_id][i].detach() for i in range(num_levels)
782
- ]
783
- img_shape = img_metas[img_id]['img_shape']
784
- scale_factor = img_metas[img_id]['scale_factor']
785
- proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
786
- guided_anchor_list,
787
- loc_mask_list, img_shape,
788
- scale_factor, cfg, rescale)
789
- result_list.append(proposals)
790
- return result_list
791
-
792
- def _get_bboxes_single(self,
793
- cls_scores,
794
- bbox_preds,
795
- mlvl_anchors,
796
- mlvl_masks,
797
- img_shape,
798
- scale_factor,
799
- cfg,
800
- rescale=False):
801
- cfg = self.test_cfg if cfg is None else cfg
802
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
803
- mlvl_bboxes = []
804
- mlvl_scores = []
805
- for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
806
- mlvl_anchors,
807
- mlvl_masks):
808
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
809
- # if no location is kept, end.
810
- if mask.sum() == 0:
811
- continue
812
- # reshape scores and bbox_pred
813
- cls_score = cls_score.permute(1, 2,
814
- 0).reshape(-1, self.cls_out_channels)
815
- if self.use_sigmoid_cls:
816
- scores = cls_score.sigmoid()
817
- else:
818
- scores = cls_score.softmax(-1)
819
- bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
820
- # filter scores, bbox_pred w.r.t. mask.
821
- # anchors are filtered in get_anchors() beforehand.
822
- scores = scores[mask, :]
823
- bbox_pred = bbox_pred[mask, :]
824
- if scores.dim() == 0:
825
- anchors = anchors.unsqueeze(0)
826
- scores = scores.unsqueeze(0)
827
- bbox_pred = bbox_pred.unsqueeze(0)
828
- # filter anchors, bbox_pred, scores w.r.t. scores
829
- nms_pre = cfg.get('nms_pre', -1)
830
- if nms_pre > 0 and scores.shape[0] > nms_pre:
831
- if self.use_sigmoid_cls:
832
- max_scores, _ = scores.max(dim=1)
833
- else:
834
- # remind that we set FG labels to [0, num_class-1]
835
- # since mmdet v2.0
836
- # BG cat_id: num_class
837
- max_scores, _ = scores[:, :-1].max(dim=1)
838
- _, topk_inds = max_scores.topk(nms_pre)
839
- anchors = anchors[topk_inds, :]
840
- bbox_pred = bbox_pred[topk_inds, :]
841
- scores = scores[topk_inds, :]
842
- bboxes = self.bbox_coder.decode(
843
- anchors, bbox_pred, max_shape=img_shape)
844
- mlvl_bboxes.append(bboxes)
845
- mlvl_scores.append(scores)
846
- mlvl_bboxes = torch.cat(mlvl_bboxes)
847
- if rescale:
848
- mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
849
- mlvl_scores = torch.cat(mlvl_scores)
850
- if self.use_sigmoid_cls:
851
- # Add a dummy background class to the backend when using sigmoid
852
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
853
- # BG cat_id: num_class
854
- padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
855
- mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
856
- # multi class NMS
857
- det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
858
- cfg.score_thr, cfg.nms,
859
- cfg.max_per_img)
860
- return det_bboxes, det_labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/paper_runfiles/generate_test_ffhq.sh DELETED
@@ -1,17 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # paths to data are valid for mml-ws01
4
- OUT_DIR="/media/inpainting/paper_data/FFHQ_val"
5
-
6
- source "$(dirname $0)/env.sh"
7
-
8
- for datadir in test
9
- do
10
- for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512
11
- do
12
- "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-ffhq \
13
- location.out_dir=$OUT_DIR cropping.out_square_crop=False
14
-
15
- "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
16
- done
17
- done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClassCat/Medical-Image-Classification-with-MONAI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Medical Image Classification With MONAI
3
- emoji: 🔥
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: True
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/css/buttons.css DELETED
@@ -1,4 +0,0 @@
1
- .buttons {
2
- display: flex;
3
- justify-content: left;
4
- }
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/qu2cu/benchmark.py DELETED
@@ -1,57 +0,0 @@
1
- """Benchmark the qu2cu algorithm performance."""
2
-
3
- from .qu2cu import *
4
- from fontTools.cu2qu import curve_to_quadratic
5
- import random
6
- import timeit
7
-
8
- MAX_ERR = 0.5
9
- NUM_CURVES = 5
10
-
11
-
12
- def generate_curves(n):
13
- points = [
14
- tuple(float(random.randint(0, 2048)) for coord in range(2))
15
- for point in range(1 + 3 * n)
16
- ]
17
- curves = []
18
- for i in range(n):
19
- curves.append(tuple(points[i * 3 : i * 3 + 4]))
20
- return curves
21
-
22
-
23
- def setup_quadratic_to_curves():
24
- curves = generate_curves(NUM_CURVES)
25
- quadratics = [curve_to_quadratic(curve, MAX_ERR) for curve in curves]
26
- return quadratics, MAX_ERR
27
-
28
-
29
- def run_benchmark(module, function, setup_suffix="", repeat=25, number=1):
30
- setup_func = "setup_" + function
31
- if setup_suffix:
32
- print("%s with %s:" % (function, setup_suffix), end="")
33
- setup_func += "_" + setup_suffix
34
- else:
35
- print("%s:" % function, end="")
36
-
37
- def wrapper(function, setup_func):
38
- function = globals()[function]
39
- setup_func = globals()[setup_func]
40
-
41
- def wrapped():
42
- return function(*setup_func())
43
-
44
- return wrapped
45
-
46
- results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
47
- print("\t%5.1fus" % (min(results) * 1000000.0 / number))
48
-
49
-
50
- def main():
51
- """Benchmark the qu2cu algorithm performance."""
52
- run_benchmark("qu2cu", "quadratic_to_curves")
53
-
54
-
55
- if __name__ == "__main__":
56
- random.seed(1)
57
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-097d3f80.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as $,e as x,s as K,J,K as f,p as q,M as N,n as P,A as j,N as A,O as X,P as ie,k as U,T as fe,Z as Ue,U as he,o as M,Q as F,aj as Me,af as be,Y as Se,X as Ce,u as Q,v as p,y as Y,z as g,R as _e,x as S,a1 as Ee,B as de,a6 as Pe,aB as Re,F as y,h as ye,m as le,j as ze,t as Fe,a9 as Ae,ab as De,ac as Ie,ad as Oe,am as Xe,a7 as Le,ak as T,E as He,ae as Je,q as Ke,r as We}from"./index-1d65707a.js";import{n as ge}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import{B as Ge}from"./Button-f155035a.js";import{U as Qe}from"./Upload-9bb55fba.js";import{M as Ye}from"./ModifyUpload-c89cfce3.js";import{B as Be}from"./BlockLabel-66866176.js";import{U as Ze,W as $e}from"./StaticImage.svelte_svelte_type_style_lang-7eb5d885.js";import{I as xe}from"./IconButton-d42f3661.js";import{E as et}from"./Empty-eec13822.js";import{u as tt,S as lt}from"./ShareButton-8cd3d8f6.js";import{D as nt}from"./Download-daff1959.js";import{U as at}from"./UploadText-f599be03.js";import"./Blocks-c9e1499d.js";function it(n){let e,l;return{c(){e=J("svg"),l=J("path"),f(l,"d","M8 3H5a2 2 0 0 0-2 2v3m18 0V5a2 2 0 0 0-2-2h-3m0 18h3a2 2 0 0 0 2-2v-3M3 16v3a2 2 0 0 0 2 2h3"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"width","100%"),f(e,"height","100%"),f(e,"viewBox","0 0 24 24"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"stroke-width","1.5"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round")},m(t,a){q(t,e,a),N(e,l)},p:P,i:P,o:P,d(t){t&&j(e)}}}class rt extends ${constructor(e){super(),x(this,e,null,it,K,{})}}function ot(n){let e,l,t;return{c(){e=J("svg"),l=J("rect"),t=J("rect"),f(l,"x","6"),f(l,"y","4"),f(l,"width","4"),f(l,"height","16"),f(t,"x","14"),f(t,"y","4"),f(t,"width","4"),f(t,"height","16"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"width","100%"),f(e,"height","100%"),f(e,"viewBox","0 0 24 24"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"stroke-width","1.5"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round")},m(a,i){q(a,e,i),N(e,l),N(e,t)},p:P,i:P,o:P,d(a){a&&j(e)}}}class st extends ${constructor(e){super(),x(this,e,null,ot,K,{})}}function ut(n){let e,l;return{c(){e=J("svg"),l=J("polygon"),f(l,"points","5 3 19 12 5 21 5 3"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"width","100%"),f(e,"height","100%"),f(e,"viewBox","0 0 24 24"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"stroke-width","1.5"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round")},m(t,a){q(t,e,a),N(e,l)},p:P,i:P,o:P,d(t){t&&j(e)}}}class ft extends ${constructor(e){super(),x(this,e,null,ut,K,{})}}function ct(n){let e,l,t;return{c(){e=J("svg"),l=J("polygon"),t=J("rect"),f(l,"points","23 7 16 12 23 17 23 7"),f(t,"x","1"),f(t,"y","5"),f(t,"width","15"),f(t,"height","14"),f(t,"rx","2"),f(t,"ry","2"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"width","100%"),f(e,"height","100%"),f(e,"viewBox","0 0 24 24"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"stroke-width","1.5"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round"),f(e,"class","feather feather-video")},m(a,i){q(a,e,i),N(e,l),N(e,t)},p:P,i:P,o:P,d(a){a&&j(e)}}}class me extends ${constructor(e){super(),x(this,e,null,ct,K,{})}}const we=n=>{let e=["B","KB","MB","GB","PB"],l=0;for(;n>1024;)n/=1024,l++;let t=e[l];return n.toFixed(1)+" "+t},_t=()=>!0;function dt(n,{autoplay:e}){async function l(){e&&await n.play()}return n.addEventListener("loadeddata",l),{destroy(){n.removeEventListener("loadeddata",l)}}}const{isNaN:mt}=Pe;function ht(n){let e,l;return e=new st({}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function bt(n){let e,l;return e=new ft({}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function gt(n){let e,l;return e=new Ze({}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function wt(n){let e,l,t,a,i,s,b=!1,_,m=!0,r,u,o,d,E,B,V,I,w,D=ce(n[5])+"",L,W,O=ce(n[6])+"",H,k,C,h,te,ee,Z,z,ne,re;function oe(){cancelAnimationFrame(_),l.paused||(_=Re(oe),b=!0),n[15].call(l)}const se=[gt,bt,ht],G=[];function ue(v,R){return v[5]===v[6]?0:v[7]?1:2}return B=ue(n),V=G[B]=se[B](n),Z=new rt({}),{c(){e=A("div"),l=A("video"),t=A("track"),u=X(),o=A("div"),d=A("div"),E=A("span"),V.c(),I=X(),w=A("span"),L=ie(D),W=ie(" / "),H=ie(O),k=X(),C=A("progress"),te=X(),ee=A("div"),U(Z.$$.fragment),f(t,"kind","captions"),fe(t.src,a=n[1])||f(t,"src",a),t.default=!0,fe(l.src,i=n[0])||f(l,"src",i),f(l,"preload","auto"),f(l,"data-testid",s=`${n[4]}-player`),f(l,"class","svelte-1voqrms"),n[6]===void 0&&Ue(()=>n[16].call(l)),he(l,"mirror",n[2]),f(E,"class","icon svelte-1voqrms"),f(w,"class","time svelte-1voqrms"),C.value=h=n[5]/n[6]||0,f(C,"class","svelte-1voqrms"),f(ee,"class","icon svelte-1voqrms"),f(d,"class","inner svelte-1voqrms"),f(o,"class","controls svelte-1voqrms"),f(e,"class","wrap svelte-1voqrms")},m(v,R){q(v,e,R),N(e,l),N(l,t),n[18](l),N(e,u),N(e,o),N(o,d),N(d,E),G[B].m(E,null),N(d,I),N(d,w),N(w,L),N(w,W),N(w,H),N(d,k),N(d,C),N(d,te),N(d,ee),M(Z,ee,null),z=!0,ne||(re=[F(l,"click",n[10]),F(l,"play",n[13]),F(l,"pause",n[14]),F(l,"ended",n[12]),F(l,"timeupdate",oe),F(l,"durationchange",n[16]),F(l,"play",n[17]),F(l,"pause",n[17]),Me(r=dt.call(null,l,{autoplay:n[3]})),F(E,"click",n[10]),F(C,"mousemove",n[9]),F(C,"touchmove",be(n[9])),F(C,"click",Se(be(n[11]))),F(ee,"click",n[19])],ne=!0)},p(v,[R]){(!z||R&2&&!fe(t.src,a=v[1]))&&f(t,"src",a),(!z||R&1&&!fe(l.src,i=v[0]))&&f(l,"src",i),(!z||R&16&&s!==(s=`${v[4]}-player`))&&f(l,"data-testid",s),!b&&R&32&&!mt(v[5])&&(l.currentTime=v[5]),b=!1,R&128&&m!==(m=v[7])&&l[m?"pause":"play"](),r&&Ce(r.update)&&R&8&&r.update.call(null,{autoplay:v[3]}),(!z||R&4)&&he(l,"mirror",v[2]);let ae=B;B=ue(v),B!==ae&&(Q(),p(G[ae],1,1,()=>{G[ae]=null}),Y(),V=G[B],V||(V=G[B]=se[B](v),V.c()),g(V,1),V.m(E,null)),(!z||R&32)&&D!==(D=ce(v[5])+"")&&_e(L,D),(!z||R&64)&&O!==(O=ce(v[6])+"")&&_e(H,O),(!z||R&96&&h!==(h=v[5]/v[6]||0))&&(C.value=h)},i(v){z||(g(V),g(Z.$$.fragment,v),z=!0)},o(v){p(V),p(Z.$$.fragment,v),z=!1},d(v){v&&j(e),n[18](null),G[B].d(),S(Z),ne=!1,Ee(re)}}}function ce(n){if(isNaN(n)||!isFinite(n))return"...";const e=Math.floor(n/60);let l=Math.floor(n%60);return n<10&&(l=`0${l}`),`${e}:${l}`}function kt(n,e,l){let{src:t}=e,{subtitle:a=null}=e,{mirror:i}=e,{autoplay:s}=e,{label:b="test"}=e;const _=de();let m=0,r,u=!0,o;function d(k){if(!r)return;if(k.type==="click"){B(k);return}if(k.type!=="touchmove"&&!(k.buttons&1))return;const C=k.type==="touchmove"?k.touches[0].clientX:k.clientX,{left:h,right:te}=k.currentTarget.getBoundingClientRect();l(5,m=r*(C-h)/(te-h))}async function E(){document.fullscreenElement!=o&&(o.currentTime>0&&!o.paused&&!o.ended&&o.readyState>o.HAVE_CURRENT_DATA?o.pause():await o.play())}function B(k){const{left:C,right:h}=k.currentTarget.getBoundingClientRect();l(5,m=r*(k.clientX-C)/(h-C))}function V(){_("stop"),_("end")}function I(k){y.call(this,n,k)}function w(k){y.call(this,n,k)}function D(){m=this.currentTime,l(5,m)}function L(){r=this.duration,l(6,r)}function W(){u=this.paused,l(7,u)}function O(k){ye[k?"unshift":"push"](()=>{o=k,l(8,o)})}const H=()=>o.requestFullscreen();return n.$$set=k=>{"src"in k&&l(0,t=k.src),"subtitle"in k&&l(1,a=k.subtitle),"mirror"in k&&l(2,i=k.mirror),"autoplay"in k&&l(3,s=k.autoplay),"label"in k&&l(4,b=k.label)},[t,a,i,s,b,m,r,u,o,d,E,B,V,I,w,D,L,W,O,H]}class Ne extends ${constructor(e){super(),x(this,e,kt,wt,K,{src:0,subtitle:1,mirror:2,autoplay:3,label:4})}}function pt(n){let e,l,t,a,i,s,b;e=new Ye({}),e.$on("clear",n[11]);const _=[Bt,yt],m=[];function r(u,o){return t==null&&(t=!!_t()),t?0:u[0].size?1:-1}return~(a=r(n))&&(i=m[a]=_[a](n)),{c(){U(e.$$.fragment),l=X(),i&&i.c(),s=le()},m(u,o){M(e,u,o),q(u,l,o),~a&&m[a].m(u,o),q(u,s,o),b=!0},p(u,o){let d=a;a=r(u),a===d?~a&&m[a].p(u,o):(i&&(Q(),p(m[d],1,1,()=>{m[d]=null}),Y()),~a?(i=m[a],i?i.p(u,o):(i=m[a]=_[a](u),i.c()),g(i,1),i.m(s.parentNode,s)):i=null)},i(u){b||(g(e.$$.fragment,u),g(i),b=!0)},o(u){p(e.$$.fragment,u),p(i),b=!1},d(u){u&&(j(l),j(s)),S(e,u),~a&&m[a].d(u)}}}function vt(n){let e,l,t,a;const i=[Vt,Nt],s=[];function b(_,m){return _[2]==="upload"?0:_[2]==="webcam"?1:-1}return~(e=b(n))&&(l=s[e]=i[e](n)),{c(){l&&l.c(),t=le()},m(_,m){~e&&s[e].m(_,m),q(_,t,m),a=!0},p(_,m){let r=e;e=b(_),e===r?~e&&s[e].p(_,m):(l&&(Q(),p(s[r],1,1,()=>{s[r]=null}),Y()),~e?(l=s[e],l?l.p(_,m):(l=s[e]=i[e](_),l.c()),g(l,1),l.m(t.parentNode,t)):l=null)},i(_){a||(g(l),a=!0)},o(_){p(l),a=!1},d(_){_&&j(t),~e&&s[e].d(_)}}}function yt(n){let e,l=n[0].name+"",t,a,i,s=we(n[0].size)+"",b;return{c(){e=A("div"),t=ie(l),a=X(),i=A("div"),b=ie(s),f(e,"class","file-name svelte-a6ruol"),f(i,"class","file-size svelte-a6ruol")},m(_,m){q(_,e,m),N(e,t),q(_,a,m),q(_,i,m),N(i,b)},p(_,m){m&1&&l!==(l=_[0].name+"")&&_e(t,l),m&1&&s!==(s=we(_[0].size)+"")&&_e(b,s)},i:P,o:P,d(_){_&&(j(e),j(a),j(i))}}}function Bt(n){let e=n[0]?.data,l,t,a=ke(n);return{c(){a.c(),l=le()},m(i,s){a.m(i,s),q(i,l,s),t=!0},p(i,s){s&1&&K(e,e=i[0]?.data)?(Q(),p(a,1,1,P),Y(),a=ke(i),a.c(),g(a,1),a.m(l.parentNode,l)):a.p(i,s)},i(i){t||(g(a),t=!0)},o(i){p(a),t=!1},d(i){i&&j(l),a.d(i)}}}function ke(n){let e,l;return e=new Ne({props:{autoplay:n[7],src:n[0].data,subtitle:n[1]?.data,mirror:n[5]&&n[2]==="webcam",label:n[3]}}),e.$on("play",n[18]),e.$on("pause",n[19]),e.$on("stop",n[20]),e.$on("end",n[21]),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a&128&&(i.autoplay=t[7]),a&1&&(i.src=t[0].data),a&2&&(i.subtitle=t[1]?.data),a&36&&(i.mirror=t[5]&&t[2]==="webcam"),a&8&&(i.label=t[3]),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Nt(n){let e,l;return e=new $e({props:{mirror_webcam:n[5],include_audio:n[6],mode:"video"}}),e.$on("error",n[14]),e.$on("capture",n[15]),e.$on("start_recording",n[16]),e.$on("stop_recording",n[17]),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a&32&&(i.mirror_webcam=t[5]),a&64&&(i.include_audio=t[6]),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Vt(n){let e,l,t;function a(s){n[13](s)}let i={filetype:"video/x-m4v,video/*",$$slots:{default:[Tt]},$$scope:{ctx:n}};return n[8]!==void 0&&(i.dragging=n[8]),e=new Qe({props:i}),ye.push(()=>ze(e,"dragging",a)),e.$on("load",n[10]),{c(){U(e.$$.fragment)},m(s,b){M(e,s,b),t=!0},p(s,b){const _={};b&4194304&&(_.$$scope={dirty:b,ctx:s}),!l&&b&256&&(l=!0,_.dragging=s[8],Fe(()=>l=!1)),e.$set(_)},i(s){t||(g(e.$$.fragment,s),t=!0)},o(s){p(e.$$.fragment,s),t=!1},d(s){S(e,s)}}}function Tt(n){let e;const l=n[12].default,t=Ae(l,n,n[22],null);return{c(){t&&t.c()},m(a,i){t&&t.m(a,i),e=!0},p(a,i){t&&t.p&&(!e||i&4194304)&&De(t,l,a,a[22],e?Oe(l,a[22],i,null):Ie(a[22]),null)},i(a){e||(g(t,a),e=!0)},o(a){p(t,a),e=!1},d(a){t&&t.d(a)}}}function qt(n){let e,l,t,a,i,s;e=new Be({props:{show_label:n[4],Icon:me,label:n[3]||"Video"}});const b=[vt,pt],_=[];function m(r,u){return r[0]===null?0:1}return t=m(n),a=_[t]=b[t](n),{c(){U(e.$$.fragment),l=X(),a.c(),i=le()},m(r,u){M(e,r,u),q(r,l,u),_[t].m(r,u),q(r,i,u),s=!0},p(r,[u]){const o={};u&16&&(o.show_label=r[4]),u&8&&(o.label=r[3]||"Video"),e.$set(o);let d=t;t=m(r),t===d?_[t].p(r,u):(Q(),p(_[d],1,1,()=>{_[d]=null}),Y(),a=_[t],a?a.p(r,u):(a=_[t]=b[t](r),a.c()),g(a,1),a.m(i.parentNode,i))},i(r){s||(g(e.$$.fragment,r),g(a),s=!0)},o(r){p(e.$$.fragment,r),p(a),s=!1},d(r){r&&(j(l),j(i)),S(e,r),_[t].d(r)}}}function jt(n,e,l){let{$$slots:t={},$$scope:a}=e,{value:i=null}=e,{subtitle:s=null}=e,{source:b}=e,{label:_=void 0}=e,{show_label:m=!0}=e,{mirror_webcam:r=!1}=e,{include_audio:u}=e,{autoplay:o}=e;const d=de();function E({detail:h}){d("change",h),d("upload",h),l(0,i=h)}function B({detail:h}){l(0,i=null),d("change",h),d("clear")}let V=!1;function I(h){V=h,l(8,V)}function w(h){y.call(this,n,h)}const D=({detail:h})=>d("change",h);function L(h){y.call(this,n,h)}function W(h){y.call(this,n,h)}function O(h){y.call(this,n,h)}function H(h){y.call(this,n,h)}function k(h){y.call(this,n,h)}function C(h){y.call(this,n,h)}return n.$$set=h=>{"value"in h&&l(0,i=h.value),"subtitle"in h&&l(1,s=h.subtitle),"source"in h&&l(2,b=h.source),"label"in h&&l(3,_=h.label),"show_label"in h&&l(4,m=h.show_label),"mirror_webcam"in h&&l(5,r=h.mirror_webcam),"include_audio"in h&&l(6,u=h.include_audio),"autoplay"in h&&l(7,o=h.autoplay),"$$scope"in h&&l(22,a=h.$$scope)},n.$$.update=()=>{n.$$.dirty&256&&d("drag",V)},[i,s,b,_,m,r,u,o,V,d,E,B,t,I,w,D,L,W,O,H,k,C,a]}let Ut=class extends ${constructor(e){super(),x(this,e,jt,qt,K,{value:0,subtitle:1,source:2,label:3,show_label:4,mirror_webcam:5,include_audio:6,autoplay:7})}};function Mt(n){let e=n[0].data,l,t,a,i,s,b,_,m,r=pe(n);i=new xe({props:{Icon:nt,label:"Download"}});let u=n[5]&&ve(n);return{c(){r.c(),l=X(),t=A("div"),a=A("a"),U(i.$$.fragment),_=X(),u&&u.c(),f(a,"href",s=n[0].data),f(a,"target",window.__is_colab__?"_blank":null),f(a,"download",b=n[0].orig_name||n[0].name),f(t,"class","icon-buttons svelte-rvdo70"),f(t,"data-testid","download-div")},m(o,d){r.m(o,d),q(o,l,d),q(o,t,d),N(t,a),M(i,a,null),N(t,_),u&&u.m(t,null),m=!0},p(o,d){d&1&&K(e,e=o[0].data)?(Q(),p(r,1,1,P),Y(),r=pe(o),r.c(),g(r,1),r.m(l.parentNode,l)):r.p(o,d),(!m||d&1&&s!==(s=o[0].data))&&f(a,"href",s),(!m||d&1&&b!==(b=o[0].orig_name||o[0].name))&&f(a,"download",b),o[5]?u?(u.p(o,d),d&32&&g(u,1)):(u=ve(o),u.c(),g(u,1),u.m(t,null)):u&&(Q(),p(u,1,1,()=>{u=null}),Y())},i(o){m||(g(r),g(i.$$.fragment,o),g(u),m=!0)},o(o){p(r),p(i.$$.fragment,o),p(u),m=!1},d(o){o&&(j(l),j(t)),r.d(o),S(i),u&&u.d()}}}function St(n){let e,l;return e=new et({props:{unpadded_box:!0,size:"large",$$slots:{default:[Ct]},$$scope:{ctx:n}}}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a&32768&&(i.$$scope={dirty:a,ctx:t}),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function pe(n){let e,l;return e=new Ne({props:{src:n[0].data,subtitle:n[1]?.data,autoplay:n[4],mirror:!1,label:n[2]}}),e.$on("play",n[6]),e.$on("pause",n[7]),e.$on("ended",n[8]),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a&1&&(i.src=t[0].data),a&2&&(i.subtitle=t[1]?.data),a&16&&(i.autoplay=t[4]),a&4&&(i.label=t[2]),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function ve(n){let e,l;return e=new lt({props:{value:n[0],formatter:n[9]}}),e.$on("error",n[10]),e.$on("share",n[11]),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a&1&&(i.value=t[0]),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Ct(n){let e,l;return e=new me({}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Et(n){let e,l,t,a,i,s;e=new Be({props:{show_label:n[3],Icon:me,label:n[2]||"Video"}});const b=[St,Mt],_=[];function m(r,u){return r[0]===null?0:1}return t=m(n),a=_[t]=b[t](n),{c(){U(e.$$.fragment),l=X(),a.c(),i=le()},m(r,u){M(e,r,u),q(r,l,u),_[t].m(r,u),q(r,i,u),s=!0},p(r,[u]){const o={};u&8&&(o.show_label=r[3]),u&4&&(o.label=r[2]||"Video"),e.$set(o);let d=t;t=m(r),t===d?_[t].p(r,u):(Q(),p(_[d],1,1,()=>{_[d]=null}),Y(),a=_[t],a?a.p(r,u):(a=_[t]=b[t](r),a.c()),g(a,1),a.m(i.parentNode,i))},i(r){s||(g(e.$$.fragment,r),g(a),s=!0)},o(r){p(e.$$.fragment,r),p(a),s=!1},d(r){r&&(j(l),j(i)),S(e,r),_[t].d(r)}}}function Pt(n,e,l){let{value:t=null}=e,{subtitle:a=null}=e,{label:i=void 0}=e,{show_label:s=!0}=e,{autoplay:b}=e,{show_share_button:_=!0}=e,m=null,r=null;const u=de();Xe(async()=>{t!==m&&a!==r&&r!==null&&(m=t,l(0,t=null),await Le(),l(0,t=m)),m=t,r=a});function o(w){y.call(this,n,w)}function d(w){y.call(this,n,w)}function E(w){y.call(this,n,w)}const B=async w=>w?await tt(w.data,"url"):"";function V(w){y.call(this,n,w)}function I(w){y.call(this,n,w)}return n.$$set=w=>{"value"in w&&l(0,t=w.value),"subtitle"in w&&l(1,a=w.subtitle),"label"in w&&l(2,i=w.label),"show_label"in w&&l(3,s=w.show_label),"autoplay"in w&&l(4,b=w.autoplay),"show_share_button"in w&&l(5,_=w.show_share_button)},n.$$.update=()=>{n.$$.dirty&1&&t&&u("change",t)},[t,a,i,s,b,_,o,d,E,B,V,I]}class Rt extends ${constructor(e){super(),x(this,e,Pt,Et,K,{value:0,subtitle:1,label:2,show_label:3,autoplay:4,show_share_button:5})}}function zt(n){let e,l;return e=new Ut({props:{value:n[18],subtitle:n[19],label:n[5],show_label:n[7],source:n[6],mirror_webcam:n[10],include_audio:n[11],autoplay:n[16],$$slots:{default:[At]},$$scope:{ctx:n}}}),e.$on("change",n[21]),e.$on("drag",n[30]),e.$on("error",n[31]),e.$on("clear",n[32]),e.$on("play",n[33]),e.$on("pause",n[34]),e.$on("upload",n[35]),e.$on("stop",n[36]),e.$on("end",n[37]),e.$on("start_recording",n[38]),e.$on("stop_recording",n[39]),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a[0]&262144&&(i.value=t[18]),a[0]&524288&&(i.subtitle=t[19]),a[0]&32&&(i.label=t[5]),a[0]&128&&(i.show_label=t[7]),a[0]&64&&(i.source=t[6]),a[0]&1024&&(i.mirror_webcam=t[10]),a[0]&2048&&(i.include_audio=t[11]),a[0]&65536&&(i.autoplay=t[16]),a[1]&1024&&(i.$$scope={dirty:a,ctx:t}),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Ft(n){let e,l;return e=new Rt({props:{value:n[18],subtitle:n[19],label:n[5],show_label:n[7],autoplay:n[16],show_share_button:n[17]}}),e.$on("play",n[25]),e.$on("pause",n[26]),e.$on("stop",n[27]),e.$on("share",n[28]),e.$on("error",n[29]),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a[0]&262144&&(i.value=t[18]),a[0]&524288&&(i.subtitle=t[19]),a[0]&32&&(i.label=t[5]),a[0]&128&&(i.show_label=t[7]),a[0]&65536&&(i.autoplay=t[16]),a[0]&131072&&(i.show_share_button=t[17]),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function At(n){let e,l;return e=new at({props:{type:"video"}}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p:P,i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Dt(n){let e,l,t,a,i,s;const b=[n[1]];let _={};for(let o=0;o<b.length;o+=1)_=He(_,b[o]);e=new Je({props:_});const m=[Ft,zt],r=[];function u(o,d){return o[15]==="static"?0:1}return t=u(n),a=r[t]=m[t](n),{c(){U(e.$$.fragment),l=X(),a.c(),i=le()},m(o,d){M(e,o,d),q(o,l,d),r[t].m(o,d),q(o,i,d),s=!0},p(o,d){const E=d[0]&2?Ke(b,[We(o[1])]):{};e.$set(E);let B=t;t=u(o),t===B?r[t].p(o,d):(Q(),p(r[B],1,1,()=>{r[B]=null}),Y(),a=r[t],a?a.p(o,d):(a=r[t]=m[t](o),a.c()),g(a,1),a.m(i.parentNode,i))},i(o){s||(g(e.$$.fragment,o),g(a),s=!0)},o(o){p(e.$$.fragment,o),p(a),s=!1},d(o){o&&(j(l),j(i)),S(e,o),r[t].d(o)}}}function It(n){let e,l;return e=new Ge({props:{visible:n[4],variant:n[15]==="dynamic"&&n[0]===null&&n[6]==="upload"?"dashed":"solid",border_mode:n[20]?"focus":"base",padding:!1,elem_id:n[2],elem_classes:n[3],height:n[8],width:n[9],container:n[12],scale:n[13],min_width:n[14],allow_overflow:!1,$$slots:{default:[Dt]},$$scope:{ctx:n}}}),{c(){U(e.$$.fragment)},m(t,a){M(e,t,a),l=!0},p(t,a){const i={};a[0]&16&&(i.visible=t[4]),a[0]&32833&&(i.variant=t[15]==="dynamic"&&t[0]===null&&t[6]==="upload"?"dashed":"solid"),a[0]&1048576&&(i.border_mode=t[20]?"focus":"base"),a[0]&4&&(i.elem_id=t[2]),a[0]&8&&(i.elem_classes=t[3]),a[0]&256&&(i.height=t[8]),a[0]&512&&(i.width=t[9]),a[0]&4096&&(i.container=t[12]),a[0]&8192&&(i.scale=t[13]),a[0]&16384&&(i.min_width=t[14]),a[0]&2067682|a[1]&1024&&(i.$$scope={dirty:a,ctx:t}),e.$set(i)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){p(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Ot(n,e,l){let{elem_id:t=""}=e,{elem_classes:a=[]}=e,{visible:i=!0}=e,{value:s=null}=e,b=null,{label:_}=e,{source:m}=e,{root:r}=e,{root_url:u}=e,{show_label:o}=e,{loading_status:d}=e,{height:E}=e,{width:B}=e,{mirror_webcam:V}=e,{include_audio:I}=e,{container:w=!1}=e,{scale:D=null}=e,{min_width:L=void 0}=e,{mode:W}=e,{autoplay:O=!1}=e,{show_share_button:H=!0}=e,k=null,C=null,h=!1;const te=de();function ee({detail:c}){c!=null?l(0,s=[c,null]):l(0,s=null)}function Z(c){y.call(this,n,c)}function z(c){y.call(this,n,c)}function ne(c){y.call(this,n,c)}function re(c){y.call(this,n,c)}function oe(c){y.call(this,n,c)}const se=({detail:c})=>l(20,h=c),G=({detail:c})=>{l(1,d=d||{}),l(1,d.status="error",d),l(1,d.message=c,d)};function ue(c){y.call(this,n,c)}function v(c){y.call(this,n,c)}function R(c){y.call(this,n,c)}function ae(c){y.call(this,n,c)}function Ve(c){y.call(this,n,c)}function Te(c){y.call(this,n,c)}function qe(c){y.call(this,n,c)}function je(c){y.call(this,n,c)}return n.$$set=c=>{"elem_id"in c&&l(2,t=c.elem_id),"elem_classes"in c&&l(3,a=c.elem_classes),"visible"in c&&l(4,i=c.visible),"value"in c&&l(0,s=c.value),"label"in c&&l(5,_=c.label),"source"in c&&l(6,m=c.source),"root"in c&&l(22,r=c.root),"root_url"in c&&l(23,u=c.root_url),"show_label"in c&&l(7,o=c.show_label),"loading_status"in c&&l(1,d=c.loading_status),"height"in c&&l(8,E=c.height),"width"in c&&l(9,B=c.width),"mirror_webcam"in c&&l(10,V=c.mirror_webcam),"include_audio"in c&&l(11,I=c.include_audio),"container"in c&&l(12,w=c.container),"scale"in c&&l(13,D=c.scale),"min_width"in c&&l(14,L=c.min_width),"mode"in c&&l(15,W=c.mode),"autoplay"in c&&l(16,O=c.autoplay),"show_share_button"in c&&l(17,H=c.show_share_button)},n.$$.update=()=>{n.$$.dirty[0]&12582913&&(s!=null?(l(18,k=ge(s[0],r,u)),l(19,C=ge(s[1],r,u))):(l(18,k=null),l(19,C=null))),n.$$.dirty[0]&16777217&&JSON.stringify(s)!==JSON.stringify(b)&&(l(24,b=s),te("change"))},[s,d,t,a,i,_,m,o,E,B,V,I,w,D,L,W,O,H,k,C,h,ee,r,u,b,Z,z,ne,re,oe,se,G,ue,v,R,ae,Ve,Te,qe,je]}class Xt extends ${constructor(e){super(),x(this,e,Ot,It,K,{elem_id:2,elem_classes:3,visible:4,value:0,label:5,source:6,root:22,root_url:23,show_label:7,loading_status:1,height:8,width:9,mirror_webcam:10,include_audio:11,container:12,scale:13,min_width:14,mode:15,autoplay:16,show_share_button:17},null,[-1,-1])}get elem_id(){return this.$$.ctx[2]}set elem_id(e){this.$$set({elem_id:e}),T()}get elem_classes(){return this.$$.ctx[3]}set elem_classes(e){this.$$set({elem_classes:e}),T()}get visible(){return this.$$.ctx[4]}set visible(e){this.$$set({visible:e}),T()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),T()}get label(){return this.$$.ctx[5]}set label(e){this.$$set({label:e}),T()}get source(){return this.$$.ctx[6]}set source(e){this.$$set({source:e}),T()}get root(){return this.$$.ctx[22]}set root(e){this.$$set({root:e}),T()}get root_url(){return this.$$.ctx[23]}set root_url(e){this.$$set({root_url:e}),T()}get show_label(){return this.$$.ctx[7]}set show_label(e){this.$$set({show_label:e}),T()}get loading_status(){return this.$$.ctx[1]}set loading_status(e){this.$$set({loading_status:e}),T()}get height(){return this.$$.ctx[8]}set height(e){this.$$set({height:e}),T()}get width(){return this.$$.ctx[9]}set width(e){this.$$set({width:e}),T()}get mirror_webcam(){return this.$$.ctx[10]}set mirror_webcam(e){this.$$set({mirror_webcam:e}),T()}get include_audio(){return this.$$.ctx[11]}set include_audio(e){this.$$set({include_audio:e}),T()}get container(){return this.$$.ctx[12]}set container(e){this.$$set({container:e}),T()}get scale(){return this.$$.ctx[13]}set scale(e){this.$$set({scale:e}),T()}get min_width(){return this.$$.ctx[14]}set min_width(e){this.$$set({min_width:e}),T()}get mode(){return this.$$.ctx[15]}set mode(e){this.$$set({mode:e}),T()}get autoplay(){return this.$$.ctx[16]}set autoplay(e){this.$$set({autoplay:e}),T()}get show_share_button(){return this.$$.ctx[17]}set show_share_button(e){this.$$set({show_share_button:e}),T()}}const nl=Xt,al=["static","dynamic"],il=n=>({type:{input_payload:"{ name: string; data: string }",response_object:"{ name: string; data: string, is_file: boolean }"},description:{input_payload:"object with file name and base64 data",response_object:"object that includes path to video file. The URL: {ROOT}file={name} contains the data"}});export{nl as Component,il as document,al as modes};
2
- //# sourceMappingURL=index-097d3f80.js.map