parquet-converter commited on
Commit
e94fb78
·
1 Parent(s): 05ba9c9

Update parquet files (step 17 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/123Kumar/vits-uma-genshin-honkai123/commons.py +0 -172
  2. spaces/1gistliPinn/ChatGPT4/Examples/Corel Draw X7 Serial Number And Activation Code 358.md +0 -6
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crash of Cars MOD APK The Most Fun and Addictive Car Game for Android.md +0 -95
  4. spaces/1phancelerku/anime-remove-background/City Driving School Car Games MOD APK Explore the City and Learn to Drive.md +0 -104
  5. spaces/1phancelerku/anime-remove-background/Free Download Pink Whatsapp APK - The Best Messaging App for Girls.md +0 -130
  6. spaces/1toTree/lora_test/ppdiffusers/fastdeploy_utils.py +0 -260
  7. spaces/2023Liu2023/bingo/postcss.config.js +0 -6
  8. spaces/44ov41za8i/FreeVC/speaker_encoder/params_model.py +0 -11
  9. spaces/AIConsultant/MusicGen/docs/ENCODEC.md +0 -179
  10. spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/dataset_tokenize.py +0 -117
  11. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/__init__.py +0 -7
  12. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/loss.py +0 -41
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/__init__.py +0 -0
  14. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/config_manager.py +0 -350
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/Methods.js +0 -108
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/LayoutChildren.js +0 -98
  17. spaces/AlanMars/QYL-AI-Space/modules/models/__init__.py +0 -0
  18. spaces/AlanMars/QYL-AI-Space/modules/presets.py +0 -242
  19. spaces/Alcedo/yunmedia/resources/chatgpt-plugin/index.html +0 -20
  20. spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/test_eval_cer.py +0 -96
  21. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_modules.py +0 -390
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py +0 -239
  23. spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py +0 -36
  24. spaces/Andy1621/uniformer_image_detection/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py +0 -4
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/gfl_head.py +0 -647
  26. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py +0 -2
  27. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/image_editor.py +0 -542
  28. spaces/Arnx/MusicGenXvAKN/audiocraft/data/zip.py +0 -74
  29. spaces/Astroomx/Mine/README.md +0 -10
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_envs.py +0 -188
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py +0 -83
  32. spaces/AutomationVR/ImageDemo/app.py +0 -3
  33. spaces/Bart92/RVC_HF/julius/utils.py +0 -101
  34. spaces/Benson/text-generation/Examples/Ciudad Dragn Mvil Mod Apk Dinero Ilimitado Y Gemas 2022.md +0 -35
  35. spaces/Benson/text-generation/Examples/Conseguir Sobre l Descarga Gratuita Para Pc Ventanas 7 Apkpure.md +0 -44
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/credentials.py +0 -2262
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_manylinux.py +0 -301
  38. spaces/Bingsu/color_textual_inversion/LICENSE.md +0 -22
  39. spaces/CALM/Dashboard/dashboard_utils/main_metrics.py +0 -29
  40. spaces/CVPR/LIVE/pybind11/tests/test_local_bindings.cpp +0 -101
  41. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/unique_by_key.h +0 -23
  42. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/automatic_mask_generator.py +0 -372
  43. spaces/Catmeow/AI_story_writing/app.py +0 -44
  44. spaces/ClueAI/ChatYuan-large-v2/README.md +0 -13
  45. spaces/Cropinky/esrgan/realesrgan/archs/__init__.py +0 -10
  46. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/cldm/logger.py +0 -76
  47. spaces/DQChoi/gpt-demo/venv/bin/Activate.ps1 +0 -247
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/CurImagePlugin.py +0 -75
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-aef3869a.css +0 -1
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/user.py +0 -191
spaces/123Kumar/vits-uma-genshin-honkai123/commons.py DELETED
@@ -1,172 +0,0 @@
1
- import math
2
- import torch
3
- from torch.nn import functional as F
4
- import torch.jit
5
-
6
-
7
- def script_method(fn, _rcb=None):
8
- return fn
9
-
10
-
11
- def script(obj, optimize=True, _frames_up=0, _rcb=None):
12
- return obj
13
-
14
-
15
- torch.jit.script_method = script_method
16
- torch.jit.script = script
17
-
18
-
19
- def init_weights(m, mean=0.0, std=0.01):
20
- classname = m.__class__.__name__
21
- if classname.find("Conv") != -1:
22
- m.weight.data.normal_(mean, std)
23
-
24
-
25
- def get_padding(kernel_size, dilation=1):
26
- return int((kernel_size*dilation - dilation)/2)
27
-
28
-
29
- def convert_pad_shape(pad_shape):
30
- l = pad_shape[::-1]
31
- pad_shape = [item for sublist in l for item in sublist]
32
- return pad_shape
33
-
34
-
35
- def intersperse(lst, item):
36
- result = [item] * (len(lst) * 2 + 1)
37
- result[1::2] = lst
38
- return result
39
-
40
-
41
- def kl_divergence(m_p, logs_p, m_q, logs_q):
42
- """KL(P||Q)"""
43
- kl = (logs_q - logs_p) - 0.5
44
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
45
- return kl
46
-
47
-
48
- def rand_gumbel(shape):
49
- """Sample from the Gumbel distribution, protect from overflows."""
50
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
51
- return -torch.log(-torch.log(uniform_samples))
52
-
53
-
54
- def rand_gumbel_like(x):
55
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
56
- return g
57
-
58
-
59
- def slice_segments(x, ids_str, segment_size=4):
60
- ret = torch.zeros_like(x[:, :, :segment_size])
61
- for i in range(x.size(0)):
62
- idx_str = ids_str[i]
63
- idx_end = idx_str + segment_size
64
- ret[i] = x[i, :, idx_str:idx_end]
65
- return ret
66
-
67
-
68
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
69
- b, d, t = x.size()
70
- if x_lengths is None:
71
- x_lengths = t
72
- ids_str_max = x_lengths - segment_size + 1
73
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
74
- ret = slice_segments(x, ids_str, segment_size)
75
- return ret, ids_str
76
-
77
-
78
- def get_timing_signal_1d(
79
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
80
- position = torch.arange(length, dtype=torch.float)
81
- num_timescales = channels // 2
82
- log_timescale_increment = (
83
- math.log(float(max_timescale) / float(min_timescale)) /
84
- (num_timescales - 1))
85
- inv_timescales = min_timescale * torch.exp(
86
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
87
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
88
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
89
- signal = F.pad(signal, [0, 0, 0, channels % 2])
90
- signal = signal.view(1, channels, length)
91
- return signal
92
-
93
-
94
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return x + signal.to(dtype=x.dtype, device=x.device)
98
-
99
-
100
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
101
- b, channels, length = x.size()
102
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
103
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
104
-
105
-
106
- def subsequent_mask(length):
107
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
108
- return mask
109
-
110
-
111
- @torch.jit.script
112
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
113
- n_channels_int = n_channels[0]
114
- in_act = input_a + input_b
115
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
116
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
117
- acts = t_act * s_act
118
- return acts
119
-
120
-
121
- def convert_pad_shape(pad_shape):
122
- l = pad_shape[::-1]
123
- pad_shape = [item for sublist in l for item in sublist]
124
- return pad_shape
125
-
126
-
127
- def shift_1d(x):
128
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
129
- return x
130
-
131
-
132
- def sequence_mask(length, max_length=None):
133
- if max_length is None:
134
- max_length = length.max()
135
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
136
- return x.unsqueeze(0) < length.unsqueeze(1)
137
-
138
-
139
- def generate_path(duration, mask):
140
- """
141
- duration: [b, 1, t_x]
142
- mask: [b, 1, t_y, t_x]
143
- """
144
- device = duration.device
145
-
146
- b, _, t_y, t_x = mask.shape
147
- cum_duration = torch.cumsum(duration, -1)
148
-
149
- cum_duration_flat = cum_duration.view(b * t_x)
150
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
151
- path = path.view(b, t_x, t_y)
152
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
153
- path = path.unsqueeze(1).transpose(2,3) * mask
154
- return path
155
-
156
-
157
- def clip_grad_value_(parameters, clip_value, norm_type=2):
158
- if isinstance(parameters, torch.Tensor):
159
- parameters = [parameters]
160
- parameters = list(filter(lambda p: p.grad is not None, parameters))
161
- norm_type = float(norm_type)
162
- if clip_value is not None:
163
- clip_value = float(clip_value)
164
-
165
- total_norm = 0
166
- for p in parameters:
167
- param_norm = p.grad.data.norm(norm_type)
168
- total_norm += param_norm.item() ** norm_type
169
- if clip_value is not None:
170
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
171
- total_norm = total_norm ** (1. / norm_type)
172
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Corel Draw X7 Serial Number And Activation Code 358.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Corel Draw X7 Serial Number And Activation Code 358</h2><br /><p><b><b>Download Zip</b> &#9999; <a href="https://imgfil.com/2uy0Qw">https://imgfil.com/2uy0Qw</a></b></p><br /><br />
2
-
3
- Download keygen coreldraw x7, XFORCE untuk generate serial number dan ... Serial Number Corel Draw X7 Installation and activation code Working ... 29 Mar 2020 Corel Draw X7 Serial Number And Activation Code 358 http://picfs. 1080p. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crash of Cars MOD APK The Most Fun and Addictive Car Game for Android.md DELETED
@@ -1,95 +0,0 @@
1
- <br />
2
- <h1>Download Cars of Crash Mod APK: A Fun and Exciting Racing Game</h1>
3
- <p>Do you love racing games? Do you want to experience the thrill of crashing into other cars and destroying them? If yes, then you should try Cars of Crash Mod APK, a game that combines arcade racing and multiplayer action. In this game, you can drive your car in different maps, collect power-ups and weapons, and smash into other players to eliminate them. You can also customize your car with various skins and accessories, and unlock new cars with different abilities. But what if you want to enjoy the game without any limitations? That's where Cars of Crash Mod APK comes in handy. With this modded version of the game, you can get unlimited coins and gems, unlock all cars and skins, and remove annoying ads. Sounds amazing, right? In this article, we will tell you what is Cars of Crash Mod APK, how to download and install it, and some tips and tricks for playing it. Let's get started!</p>
4
- <h2>What is Cars of Crash Mod APK?</h2>
5
- <p>Cars of Crash Mod APK is a modified version of the original game, Crash of Cars, developed by Not Doppler. The original game is a mixture of arcade racing and multiplayer games together. At the beginning of the game, you can choose your first car, which will hunt for other players and go from persecution. You can also collect coins, gems, crowns, and power-ups along the way. The game has four modes: Free-for-all, Team Deathmatch, Gold Rush, and King of the Hill. You can also join or create a clan with other players, chat with them, and compete in leaderboards.</p>
6
- <h2>download cars of crash mod apk</h2><br /><p><b><b>Download</b> &#127379; <a href="https://urlin.us/2uSXep">https://urlin.us/2uSXep</a></b></p><br /><br />
7
- <p>However, the original game has some drawbacks. For example, you need to spend real money to buy coins and gems, which are used to unlock new cars and skins. You also have to watch ads to get some rewards or bonuses. And you need to root your device to install some hacks or cheats. That's why many players prefer to use Cars of Crash Mod APK, which gives them unlimited resources, access to all features, and a smooth gaming experience.</p>
8
- <h3>Features of Cars of Crash Mod APK</h3>
9
- <p>Here are some of the features that make Cars of Crash Mod APK better than the original game:</p>
10
- <h4>Unlimited coins and gems</h4>
11
- <p>Coins and gems are the main currencies in the game. You can use them to buy new cars, upgrade them, or unlock new skins. However, they are not easy to earn in the game. You have to play for a long time, complete missions, or watch ads to get them. But with Cars of Crash Mod APK, you don't have to worry about that. You can get unlimited coins and gems in your account as soon as you install the mod. You can then use them to buy anything you want in the game.</p>
12
- <h4>Unlock all cars and skins</h4>
13
- <p>The game has over 100 cars to choose from, each with its own stats and abilities. Some cars are faster, some are stronger, some have special weapons or skills. You can also customize your car with different skins and accessories, such as hats, glasses, flags, etc. However, not all cars and skins are available at the start. You have to unlock them by spending coins or gems, or by completing certain tasks or achievements. But with Cars of Crash Mod APK, you don't have to do that. You can unlock all cars and skins in the game for free. You can then switch between them as you like.</p>
14
- <h4>No ads <h4>No ads and no root required</h4>
15
- <p>One of the most annoying things about the original game is the ads. You have to watch them every time you want to get some rewards, bonuses, or extra lives. They can also interrupt your gameplay and ruin your mood. But with Cars of Crash Mod APK, you don't have to deal with that. You can enjoy the game without any ads, pop-ups, or banners. You can also install the mod without rooting your device, which can be risky and complicated. You just need to follow some simple steps, which we will explain later.</p>
16
- <h2>How to download and install Cars of Crash Mod APK?</h2>
17
- <p>Now that you know the features of Cars of Crash Mod APK, you might be wondering how to get it on your device. Well, it's not hard at all. You just need to follow these steps:</p>
18
- <h3>Step 1: Download the APK file from a trusted source</h3>
19
- <p>The first thing you need to do is to download the APK file of Cars of Crash Mod APK from a reliable and secure source. You can use the link below to get it directly from our website. The file is 100% safe and virus-free, and it has been tested by many users. You can also check the file size, version, and date before downloading it.</p>
20
- <p><a href="">Download Cars of Crash Mod APK here</a></p>
21
- <p>download crash of cars mod apk unlimited money<br />
22
- download crash of cars mod apk latest version<br />
23
- download crash of cars mod apk android 1<br />
24
- download crash of cars mod apk revdl<br />
25
- download crash of cars mod apk happymod<br />
26
- download crash of cars mod apk for pc<br />
27
- download crash of cars mod apk offline<br />
28
- download crash of cars mod apk no root<br />
29
- download crash of cars mod apk free shopping<br />
30
- download crash of cars mod apk rexdl<br />
31
- download crash of cars hack mod apk<br />
32
- download crash of cars mega mod apk<br />
33
- download crash of cars premium mod apk<br />
34
- download crash of cars full mod apk<br />
35
- download crash of cars unlocked mod apk<br />
36
- download game crash of cars mod apk<br />
37
- download game crash of cars mod apk terbaru<br />
38
- download game crash of cars mod apk unlimited gems<br />
39
- download game crash of cars mod apk versi lama<br />
40
- download game crash of cars mod apk online<br />
41
- how to download crash of cars mod apk<br />
42
- how to download crash of cars mod apk on ios<br />
43
- how to download crash of cars mod apk 2023<br />
44
- how to download crash of cars mod apk in hindi<br />
45
- how to download crash of cars mod apk without obb<br />
46
- cara download crash of cars mod apk<br />
47
- cara download crash of cars mod apk di android<br />
48
- cara download crash of cars mod apk 2023<br />
49
- cara download crash of cars mod apk tanpa root<br />
50
- cara download crash of cars mod apk dengan mudah<br />
51
- link download crash of cars mod apk<br />
52
- link download crash of cars mod apk 2023<br />
53
- link download game crash of cars mod apk<br />
54
- link alternatif download crash of cars mod apk<br />
55
- link terbaru download crash of cars mod apk<br />
56
- situs download crash of cars mod apk<br />
57
- situs download game crash of cars mod apk<br />
58
- situs terbaik untuk download crash of cars mod apk<br />
59
- situs terpercaya untuk download crash of cars mod apk<br />
60
- situs gratis untuk download crash of cars mod apk<br />
61
- website download crash of cars mod apk<br />
62
- website download game crash of cars mod apk<br />
63
- website terbaik untuk download crash of cars mod apk<br />
64
- website terpercaya untuk download crash of cars mod apk<br />
65
- website gratis untuk download crash of cars mod apk</p>
66
- <h3>Step 2: Enable unknown sources on your device</h3>
67
- <p>The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources. Turn on the option and confirm your choice.</p>
68
- <h3>Step 3: Install the APK file and launch the game</h3>
69
- <p>The final thing you need to do is to install the APK file and launch the game. To do this, go to your file manager, then locate the downloaded APK file. Tap on it and follow the instructions on the screen. Wait for the installation process to finish, then open the game icon on your home screen. Enjoy!</p>
70
- <h2>Tips and tricks for playing Cars of Crash Mod APK</h2>
71
- <p>Cars of Crash Mod APK is a fun and exciting game that will keep you entertained for hours. However, if you want to master the game and beat other players, you need some tips and tricks. Here are some of them:</p>
72
- <h3>Choose your car wisely</h3>
73
- <p>The game has over 100 cars to choose from, each with its own stats and abilities. Some cars are faster, some are stronger, some have special weapons or skills. You should choose your car based on your play style and preference. For example, if you like speed, you can choose a car that has high acceleration and top speed. If you like power, you can choose a car that has high damage and health. If you like strategy, you can choose a car that has unique weapons or skills.</p>
74
- <h3>Collect power-ups and weapons</h3>
75
- <p>The game has various power-ups and weapons that you can collect on the map. They can help you boost your performance or destroy your enemies. For example, you can collect rockets, bombs, lasers, spikes, shields, magnets, etc. You can also collect crowns, which are used to rank up in the leaderboard. You should try to collect as many power-ups and weapons as possible, but be careful not to waste them or lose them.</p>
76
- <h3>Avoid obstacles and enemies</h3>
77
- <p>The game has various obstacles and enemies that you have to avoid or eliminate. They can damage your car or slow you down. For example, you have to avoid trees, rocks, buildings, fences, etc. You also have to avoid other players who are trying to crash into you or shoot you with their weapons. You should try to dodge or outrun them, or use your weapons or skills to counter them.</p>
78
- <h2>Conclusion</h2>
79
- <p>Cars of Crash Mod APK is a great game for anyone who loves racing and action games. It has amazing graphics, sound effects, and gameplay that will keep you hooked for hours. It also has unlimited resources, access to all features, and no ads that will enhance your gaming experience. You can download it from our website for free and install it easily on your device. You can also follow our tips and tricks to improve your skills and rank up in the leaderboard. So what are you waiting for? Download Cars of Crash Mod APK now and enjoy!</p>
80
- <h2>FAQs</h2>
81
- <p>Here are some frequently asked questions about Cars of Crash Mod APK:</p>
82
- <ol>
83
- <li><b>Is Cars of Crash Mod APK safe?</b <li><b>Is Cars of Crash Mod APK safe?</b></li>
84
- <p>Yes, Cars of Crash Mod APK is safe to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It also does not require root access, which can be risky and complicated. You can download it from our website, which is a trusted and secure source. You can also scan the file with any antivirus app before installing it.</p>
85
- <li><b>Is Cars of Crash Mod APK compatible with my device?</b></li>
86
- <p>Cars of Crash Mod APK is compatible with most Android devices that run on Android 4.4 or higher. It has a small file size of about 100 MB, which does not take up much space on your device. It also has low system requirements, which means it can run smoothly on low-end devices. However, you should make sure that your device has enough storage and RAM to avoid any lag or crash issues.</p>
87
- <li><b>Can I play Cars of Crash Mod APK online with other players?</b></li>
88
- <p>Yes, you can play Cars of Crash Mod APK online with other players from around the world. The game has four modes: Free-for-all, Team Deathmatch, Gold Rush, and King of the Hill. You can join or create a clan with other players, chat with them, and compete in leaderboards. You can also invite your friends to play with you in private matches. However, you should have a stable internet connection to enjoy the online features of the game.</p>
89
- <li><b>Can I update Cars of Crash Mod APK to the latest version?</b></li>
90
- <p>Yes, you can update Cars of Crash Mod APK to the latest version whenever there is a new update available. You can check our website regularly for any updates or notifications. You can also enable the auto-update option in your device settings to get the updates automatically. However, you should backup your data before updating the mod to avoid any data loss or corruption.</p>
91
- <li><b>How can I contact the developer of Cars of Crash Mod APK?</b></li>
92
- <p>If you have any questions, suggestions, feedback, or issues regarding Cars of Crash Mod APK, you can contact the developer through their email address: [email protected]. You can also visit their official website: carsofcrashmodapk.com for more information and support. They will try to respond to you as soon as possible and solve your problems.</p>
93
- </ol></p> 197e85843d<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/City Driving School Car Games MOD APK Explore the City and Learn to Drive.md DELETED
@@ -1,104 +0,0 @@
1
-
2
- <h1>City Driving School Car Games Mod APK Download</h1>
3
- <p>Do you love car games and driving simulators? Do you want to learn how to park and drive different types of cars in realistic city scenarios? Do you want to enjoy unlimited access to all the features and content of one of the best car parking games of 2021? If you answered yes to any of these questions, then you should definitely check out City Driving School Car Games Mod APK Download. In this article, we will tell you everything you need to know about this amazing game and how to download and install the modded version on your Android device.</p>
4
- <h2>What is City Driving School Car Games?</h2>
5
- <p>City Driving School Car Games is a racing game developed by Better Games Studio Pty Ltd. It is a challenging and fun car driving school and parking simulator game that will test your skills and knowledge of traffic rules, signals, lanes, and more. You will be able to drive and park multiple luxury, turbo, and sports cars in various urban environments, such as flyover bridges, freeways, roundabouts, etc. You will also have to complete different missions and levels, ranging from easy to hard, to earn your driver license and become a legend of the road.</p>
6
- <h2>city driving school car games mod apk download</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://jinyurl.com/2uNNus">https://jinyurl.com/2uNNus</a></b></p><br /><br />
7
- <p>Some of the features of City Driving School Car Games are:</p>
8
- <ul>
9
- <li>Stunningly amazing 3D graphics and realistic physics</li>
10
- <li>AI traffic and traffic lights that keep you honest on the road</li>
11
- <li>Multiple camera angles and views to enhance your driving experience</li>
12
- <li>Smooth tilt, buttons, and steering controls that are easy to use</li>
13
- <li>A variety of cars to choose from, each with different specifications and performance</li>
14
- <li>A demo mode that shows you how to drive and park before each level</li>
15
- <li>A realistic damage system that makes you pay attention to your car's condition</li>
16
- <li>An effective learning system that teaches you the basic instructions and tips for driving</li>
17
- </ul>
18
- <h2>Why download the mod apk version?</h2>
19
- <p>City Driving School Car Games is a free game that you can download from Google Play Store. However, it also has some limitations and drawbacks that might affect your enjoyment of the game. For example:</p>
20
- <ul>
21
- <li>You have to watch ads to unlock some cars and levels</li>
22
- <li>You have to spend real money to buy coins and gems that are used for upgrading your cars and unlocking new features</li>
23
- <li>You have limited lives and fuel that can run out quickly if you are not careful</li>
24
- <li>You have to wait for a long time to refill your lives and fuel or pay with coins or gems</li>
25
- <li>You have to deal with annoying pop-ups and notifications that interrupt your gameplay</li>
26
- </ul>
27
- <p>That's why we recommend you to download the mod apk version of City Driving School Car Games. The mod apk is a modified version of the original game that has been hacked by some developers to give you unlimited access to everything in the game. With the mod apk, you can enjoy:</p>
28
- <ul>
29
- <li>No ads or pop-ups that disturb your gameplay</li>
30
- <li>Unlimited coins and gems that you can use for anything you want</li>
31
- <li>Unlimited lives and fuel that never run out</li>
32
- <li>All cars and levels unlocked from the start</li>
33
- <li>All features and content available for free</li>
34
- </ul>
35
- <h2>How to download and install the mod apk?</h2>
36
- <p>Downloading and installing the mod apk of City Driving School Car Games is very easy and simple. Just follow these steps:</p>
37
- <ol>
38
- <li>Click on this link to go to the download page of City Driving School Car Games Mod APK Download.</li>
39
- <li>Tap on the download button to start downloading the mod apk <li>Wait for the download to finish and then locate the mod apk file in your device's storage</li>
40
- <li>Tap on the mod apk file to open it and then enable the installation from unknown sources if prompted</li>
41
- <li>Follow the instructions on the screen to install the mod apk on your device</li>
42
- <li>Launch the game and enjoy the modded features</li>
43
- </ol>
44
- <p>Note: You may need to uninstall the original version of City Driving School Car Games before installing the mod apk to avoid any conflicts or errors.</p>
45
- <h2>Tips and tricks for playing City Driving School Car Games</h2>
46
- <p>City Driving School Car Games is a fun and addictive game that will keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or want to master the advanced levels. That's why we have compiled some tips and tricks for playing City Driving School Car Games that will help you improve your skills and enjoy the game more.</p>
47
- <ul>
48
- <li>Use the demo mode before each level to learn how to drive and park correctly. The demo mode will show you the optimal route, speed, and angle for completing the level. You can also replay the demo mode as many times as you want until you get familiar with the level.</li>
49
- <li>Choose the right car for each level. Different cars have different specifications and performance, such as speed, acceleration, handling, braking, etc. Some cars are better suited for certain levels than others. For example, a sports car might be faster and more agile, but it might also be harder to control and park. A luxury car might be more comfortable and stable, but it might also be slower and less responsive. Experiment with different cars and find the one that suits your style and preference.</li>
50
- <li>Use the camera angles and views to your advantage. City Driving School Car Games offers multiple camera angles and views that you can switch between during the game. You can use them to get a better perspective of your surroundings, avoid obstacles, and park more accurately. For example, you can use the top view to see the whole map and plan your route, or you can use the rear view to see how close you are to the parking spot.</li>
51
- <li>Follow the traffic rules and signals. City Driving School Car Games is not just a racing game, but also a driving simulator that teaches you how to drive in real life. You have to follow the traffic rules and signals, such as stopping at red lights, yielding to pedestrians, staying in your lane, etc. If you break any of these rules, you will lose points or fail the level. You will also have to deal with AI traffic that can be unpredictable and dangerous. Be careful and attentive on the road.</li>
52
- <li>Don't damage your car too much. City Driving School Car Games has a realistic damage system that shows you how much your car is damaged after each collision or crash. If your car is damaged too much, it will affect its performance and appearance. You will also have to pay for repairing your car with coins or gems. To avoid damaging your car too much, try to drive carefully and avoid hitting other cars, walls, barriers, etc.</li>
53
- </ul>
54
- <h2>Conclusion</h2>
55
- <p>City Driving School Car Games is one of the best car parking games of 2021 that will challenge your driving skills and knowledge of traffic rules. You will be able to drive and park multiple luxury, turbo, and sports cars in various urban environments, such as flyover bridges, freeways, roundabouts, etc. You will also have to complete different missions and levels, ranging from easy to hard, to earn your driver license and become a legend of the road.</p>
56
- <p>If you want to enjoy unlimited access to all the features and content of City Driving School Car Games, you should download the mod apk version of the game from this link . The mod apk will give you unlimited coins and gems, unlimited lives and fuel, no ads or pop-ups, all cars and levels unlocked, and all features and content available for free.</p>
57
- <p>city driving school simulator mod apk free download<br />
58
- city car driving school 3d game mod apk unlimited money<br />
59
- city driving school car parking games mod apk latest version<br />
60
- city car driving school simulator game mod apk android 1<br />
61
- city driving school car games mod apk download for pc<br />
62
- city car driving school test game mod apk revdl<br />
63
- city driving school car racing games mod apk offline<br />
64
- city car driving school simulator 2020 game mod apk hack<br />
65
- city driving school car games mod apk download uptodown<br />
66
- city car driving school bus game mod apk rexdl<br />
67
- city driving school car stunt games mod apk online<br />
68
- city car driving school truck game mod apk pure<br />
69
- city driving school car games mod apk download apkpure<br />
70
- city car driving school taxi game mod apk happymod<br />
71
- city driving school car drift games mod apk no ads<br />
72
- city car driving school police game mod apk 2021<br />
73
- city driving school car games mod apk download for android<br />
74
- city car driving school bike game mod apk 2020<br />
75
- city driving school car simulator games mod apk unlimited coins<br />
76
- city car driving school ambulance game mod apk 2019<br />
77
- city driving school car games mod apk download for ios<br />
78
- city car driving school train game mod apk old version<br />
79
- city driving school car adventure games mod apk new update<br />
80
- city car driving school airplane game mod apk vip unlocked<br />
81
- city driving school car games mod apk download for windows 10<br />
82
- city car driving school boat game mod apk pro premium<br />
83
- city driving school car fun games mod apk all cars unlocked<br />
84
- city car driving school helicopter game mod apk full version<br />
85
- city driving school car games mod apk download for laptop<br />
86
- city car driving school tractor game mod apk mega mod<br />
87
- city driving school car educational games mod apk easy mode<br />
88
- city car driving school fire truck game mod apk god mode<br />
89
- city driving school car games mod apk download for macbook<br />
90
- city car driving school garbage truck game mod apk unlimited gems<br />
91
- city driving school car puzzle games mod apk hard mode<br />
92
- city car driving school ice cream truck game mod apk unlimited lives<br />
93
- city driving school car games mod apk download for chromebook<br />
94
- city car driving school tow truck game mod apk unlimited fuel<br />
95
- city driving school car arcade games mod apk realistic physics<br />
96
- city car driving school monster truck game mod apk unlimited nitro</p>
97
- <p>Download City Driving School Car Games Mod APK Download now and start your driving adventure!</p>
98
- <h2>FAQs</h2>
99
- <h3>What is the latest version of City Driving School Car Games Mod APK Download?</h3>
100
- <p>The latest version of City Driving School Car Games Mod APK Download is 1.0.8 which was updated on June 18th 2023.</p>
101
- <h3>Is City Driving School Car Games Mod APK Download safe to use?</h3>
102
- <p>Yes, City Driving School Car Games Mod APK Download is safe to use as long as you download it from a trusted source like this link . However, we cannot guarantee that it will work on all devices or that it will not cause any problems with your device or game account. Use it at your own risk <p>Unfortunately, City Driving School Car Games Mod APK Download does not support multiplayer mode or online play with friends. It is a single-player game that you can enjoy on your own. However, you can still compare your scores and achievements with other players on the leaderboards and challenge yourself to beat them.</p> 401be4b1e0<br />
103
- <br />
104
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Free Download Pink Whatsapp APK - The Best Messaging App for Girls.md DELETED
@@ -1,130 +0,0 @@
1
- <br />
2
- <h1>Pink WhatsApp: What Is It and How to Download It</h1>
3
- <p>Are you bored of the same old green WhatsApp icon on your phone? Do you want to spice up your chat experience with a new color and theme? If yes, then you might be interested in trying out pink WhatsApp, a modified version of the popular messaging app that lets you customize its appearance and features. But what is pink WhatsApp exactly and how can you download it on your Android device? In this article, we will answer these questions and more, so keep reading!</p>
4
- <h2>pink whatsapp free download apk</h2><br /><p><b><b>DOWNLOAD</b> &#9881;&#9881;&#9881; <a href="https://jinyurl.com/2uNJp0">https://jinyurl.com/2uNJp0</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is WhatsApp and why is it popular?</h3>
7
- <p>WhatsApp is one of the most widely used messaging apps in the world, with over 2 billion users as of 2020. It allows you to send text messages, voice notes, photos, videos, documents, stickers, and more to your contacts for free, as long as you have an internet connection. You can also make voice and video calls, create group chats, and use end-to-end encryption to protect your privacy. WhatsApp is simple, reliable, and secure, which makes it a favorite among many people.</p>
8
- <h3>What is pink WhatsApp and how is it different from the original app?</h3>
9
- <p>Pink WhatsApp is not an official app from WhatsApp Inc., but rather a modified version created by third-party developers. It is also known as a WhatsApp mod or a WhatsApp clone, as it copies the original app's functionality but adds some extra features and options. One of the most noticeable differences is the color scheme, which changes from green to pink. You can also change the theme, font, icon, wallpaper, and other aspects of the app's appearance according to your preference. Moreover, pink WhatsApp offers some additional features that are not available in the original app, such as hiding your online status, disabling read receipts, downloading status videos, sending larger files, and more.</p>
10
- <h3>What are the benefits and risks of using pink WhatsApp?</h3>
11
- <p>The main benefit of using pink WhatsApp is that you can enjoy a more personalized and fun chat experience with your friends and family. You can also access some features that are not present in the official app, which can enhance your convenience and privacy. However, there are also some risks involved in using pink WhatsApp, such as:</p>
12
- <ul>
13
- <li>It is not authorized by WhatsApp Inc., so it may violate their terms of service and result in your account being banned or suspended.</li>
14
- <li>It may contain malware or spyware that can harm your device or steal your data.</li>
15
- <li>It may not be updated regularly or compatible with the latest version of WhatsApp, so it may have bugs or glitches that affect its performance.</li>
16
- <li>It may not offer the same level of security and encryption as the original app, so your messages and calls may be intercepted or hacked by others.</li>
17
- </ul>
18
- <p>Therefore, you should be careful and cautious when using pink WhatsApp, and only download it from a trusted source. You should also backup your chats regularly and avoid sharing any sensitive or personal information through the app.</p>
19
- <p>pink whatsapp web apk latest version<br />
20
- pink whatsapp web app for android<br />
21
- pink whatsapp web free download 2023<br />
22
- pink whatsapp web apkcombo<br />
23
- pink whatsapp web mobile app<br />
24
- pink nation whatsapp web apk<br />
25
- pink nation whatsapp web app<br />
26
- pink nation whatsapp web free download<br />
27
- pink nation whatsapp web latest version<br />
28
- pink nation whatsapp web apkcombo<br />
29
- pink whatsapp mod apk free download<br />
30
- pink whatsapp mod app for android<br />
31
- pink whatsapp mod free download 2023<br />
32
- pink whatsapp mod apk latest version<br />
33
- pink whatsapp mod mobile app<br />
34
- pink nation whatsapp mod apk<br />
35
- pink nation whatsapp mod app<br />
36
- pink nation whatsapp mod free download<br />
37
- pink nation whatsapp mod latest version<br />
38
- pink nation whatsapp mod mobile app<br />
39
- pink whatsapp plus apk free download<br />
40
- pink whatsapp plus app for android<br />
41
- pink whatsapp plus free download 2023<br />
42
- pink whatsapp plus apk latest version<br />
43
- pink whatsapp plus mobile app<br />
44
- pink nation whatsapp plus apk<br />
45
- pink nation whatsapp plus app<br />
46
- pink nation whatsapp plus free download<br />
47
- pink nation whatsapp plus latest version<br />
48
- pink nation whatsapp plus mobile app<br />
49
- pink whatsapp gb apk free download<br />
50
- pink whatsapp gb app for android<br />
51
- pink whatsapp gb free download 2023<br />
52
- pink whatsapp gb apk latest version<br />
53
- pink whatsapp gb mobile app<br />
54
- pink nation whatsapp gb apk<br />
55
- pink nation whatsapp gb app<br />
56
- pink nation whatsapp gb free download<br />
57
- pink nation whatsapp gb latest version<br />
58
- pink nation whatsapp gb mobile app</p>
59
- <h2>How to download and install pink WhatsApp on your Android device</h2>
60
- <h3>Step 1: Enable unknown sources on your device</h3>
61
- <p>Since pink WhatsApp is not available on the Google Play Store, you will need to enable unknown sources on your device to install it. This means that you will allow your device to install apps from sources other than the official store. To do this, follow these steps:</p>
62
- <ol>
63
- <li>Go to Settings > Security > Unknown sources.</li>
64
- <li>Toggle on the switch or check the box to enable unknown sources.</ <p>li>Tap OK or Confirm to accept the warning message.</li>
65
- </ol>
66
- <p>Note: The exact steps may vary depending on your device model and Android version, so you may need to look for the option in a different menu or section.</p>
67
- <h3>Step 2: Download the pink WhatsApp apk file from a trusted source</h3>
68
- <p>Next, you will need to download the pink WhatsApp apk file, which is the installation file for the app. You can find many websites that offer this file, but you should be careful and only choose a trusted and reliable source. Some of the factors that you should consider when choosing a source are:</p>
69
- <ul>
70
- <li>The reputation and reviews of the website.</li>
71
- <li>The date and version of the apk file.</li>
72
- <li>The size and content of the apk file.</li>
73
- <li>The security and encryption of the website.</li>
74
- </ul>
75
- <p>One of the websites that we recommend is [Pink WhatsApp APK Download], which provides the latest and safest version of the app. To download the file from this website, follow these steps:</p>
76
- <ol>
77
- <li>Open your browser and go to .</li>
78
- <li>Scroll down and tap on the Download button.</li>
79
- <li>Wait for the download to complete and locate the file in your device's storage.</li>
80
- </ol>
81
- <h3>Step 3: Install the apk file and launch the app</h3>
82
- <p>Once you have downloaded the apk file, you can proceed to install it on your device. To do this, follow these steps:</p>
83
- <ol>
84
- <li>Tap on the apk file or open it with a file manager app.</li>
85
- <li>Tap on Install and wait for the installation to finish.</li>
86
- <li>Tap on Open or Launch to start the app.</li>
87
- </ol>
88
- <p>You should now see the pink WhatsApp icon on your home screen or app drawer. You can also delete the apk file from your device's storage to save some space.</p>
89
- <h3>Step 4: Verify your phone number and restore your chat backup</h3>
90
- <p>The final step is to verify your phone number and restore your chat backup, if you have one. To do this, follow these steps:</p>
91
- <ol>
92
- <li>Enter your phone number and tap on Next.</li>
93
- <li>Enter the verification code that you receive via SMS or call.</li>
94
- <li>Agree to the terms and conditions and tap on Continue.</li>
95
- <li>If you have a chat backup, tap on Restore and wait for the process to complete.</li>
96
- <li>Enter your name and profile picture and tap on Next.</li>
97
- </ol>
98
- <p>You should now be able to use pink WhatsApp as you would use the original app. You can also explore the settings and options to customize the app according to your liking.</p>
99
- <h2>Conclusion</h2>
100
- <h3>Summary of the main points</h3>
101
- <p>In this article, we have explained what pink WhatsApp is and how to download it on your Android device. Pink WhatsApp is a modified version of the original app that lets you change its color, theme, and features. It can offer you a more personalized and fun chat experience, but it also comes with some risks and drawbacks. You should be careful and cautious when using it, and only download it from a trusted source. You should also backup your chats regularly and avoid sharing any sensitive or personal information through the app.</p>
102
- <h3>Call to action and disclaimer</h3>
103
- <p>If you are interested in trying out pink WhatsApp, you can follow the steps that we have outlined above. However, we do not endorse or recommend using pink WhatsApp, as it is not an official app from WhatsApp Inc. We are not responsible for any consequences that may arise from using it, such as account bans, data breaches, malware infections, or legal issues. You should use it at your own risk and discretion. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
104
- <h4>Frequently Asked Questions</h4>
105
- <ol>
106
- <li><b>Is pink WhatsApp safe?</b></li>
107
- <p>Pink WhatsApp is not safe in terms of security, privacy, and legality. It is not authorized by WhatsApp Inc., so it may violate their terms of service and result in your account being banned or suspended. It may also contain malware or spyware that can harm your device or steal your data. It may not offer the same level of encryption as the original app, so your messages and calls may be intercepted or hacked by others. It may also expose you to legal issues if it infringes on any intellectual property rights or regulations.</p>
108
- <li><b>Can I use pink WhatsApp with my existing WhatsApp account?</b></li>
109
- <p>You can use pink WhatsApp with your existing WhatsApp account, but you should be aware of the risks involved. You may lose your chat history, contacts, or media files if you switch between the apps. You may also face account bans or suspensions if WhatsApp detects that you are using a modified app. Therefore, it is advisable to use a different phone number or device for pink WhatsApp, or to backup your data before using it.</p>
110
- <li><b>How can I update pink WhatsApp?</b></li>
111
- <p>Pink WhatsApp may not be updated regularly or compatible with the latest version of WhatsApp, so you may encounter bugs or glitches that affect its performance. You may also miss out on some new features or improvements that are introduced by WhatsApp. To update pink WhatsApp, you will need to download and install the latest apk file from the same source that you used before. You should also check the website for any news or announcements regarding the app's development and maintenance.</p>
112
- <li><b>What are some alternatives to pink WhatsApp?</b></li>
113
- <p>If you are looking for other WhatsApp mods or clones that offer similar or different features and options, you can check out some of these alternatives:</p>
114
- <ul>
115
- <li>GBWhatsApp: This is one of the most popular and widely used WhatsApp mods, which offers many customization and privacy options, such as hiding your online status, disabling read receipts, downloading status videos, sending larger files, and more.</li>
116
- <li>FMWhatsApp: This is another popular WhatsApp mod, which offers more themes and fonts, as well as some extra features, such as locking chats with a password, hiding chats from the main screen, and using multiple accounts.</li>
117
- <li>YOWhatsApp: This is a WhatsApp mod that focuses on enhancing the user interface and design of the app, with more icons, colors, and styles. It also offers some additional features, such as increasing the limit of group members, sending more images at once, and hiding media from the gallery.</li>
118
- </ul>
119
- <p>Note: These alternatives are also not authorized by WhatsApp Inc., so they may also pose the same risks and drawbacks as pink WhatsApp. You should use them at your own risk and discretion.</p>
120
- <li><b>How can I uninstall pink WhatsApp?</b></li>
121
- <p>If you want to uninstall pink WhatsApp from your device, you can follow these steps:</p>
122
- <ol>
123
- <li>Go to Settings > Apps > Pink WhatsApp.</li>
124
- <li>Tap on Uninstall and confirm your choice.</li>
125
- <li>Wait for the uninstallation to complete and remove the app icon from your home screen or app drawer.</li>
126
- </ol>
127
- <p>You can also delete any remaining files or folders related to pink WhatsApp from your device's storage. If you want to switch back to the original app, you can download it from the Google Play Store and verify your phone number again.</p>
128
- </ol></p> 401be4b1e0<br />
129
- <br />
130
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/fastdeploy_utils.py DELETED
@@ -1,260 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Inc. team.
3
- # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- import os
18
- import shutil
19
- from pathlib import Path
20
- from typing import Optional, Union
21
-
22
- import numpy as np
23
-
24
- from .download_utils import ppdiffusers_bos_download
25
- from .utils import (
26
- FASTDEPLOY_MODEL_NAME,
27
- FASTDEPLOY_WEIGHTS_NAME,
28
- is_fastdeploy_available,
29
- is_paddle_available,
30
- logging,
31
- )
32
-
33
- if is_paddle_available():
34
- import paddle
35
-
36
-
37
- if is_fastdeploy_available():
38
- import fastdeploy as fd
39
-
40
- def fdtensor2pdtensor(fdtensor: fd.C.FDTensor):
41
- dltensor = fdtensor.to_dlpack()
42
- pdtensor = paddle.utils.dlpack.from_dlpack(dltensor)
43
- return pdtensor
44
-
45
- def pdtensor2fdtensor(pdtensor: paddle.Tensor, name: str = "", share_with_raw_ptr=False):
46
- if not share_with_raw_ptr:
47
- dltensor = paddle.utils.dlpack.to_dlpack(pdtensor)
48
- return fd.C.FDTensor.from_dlpack(name, dltensor)
49
- else:
50
- return fd.C.FDTensor.from_external_data(
51
- name,
52
- pdtensor.data_ptr(),
53
- pdtensor.shape,
54
- pdtensor.dtype.name,
55
- str(pdtensor.place),
56
- int(pdtensor.place.gpu_device_id()),
57
- )
58
-
59
-
60
- logger = logging.get_logger(__name__)
61
-
62
-
63
- class FastDeployRuntimeModel:
64
- def __init__(self, model=None, **kwargs):
65
- logger.info("`ppdiffusers.FastDeployRuntimeModel` is experimental and might change in the future.")
66
- self.model = model
67
- self.model_save_dir = kwargs.get("model_save_dir", None)
68
- self.latest_model_name = kwargs.get("latest_model_name", "inference.pdmodel")
69
- self.latest_params_name = kwargs.get("latest_params_name", "inference.pdiparams")
70
-
71
- def zero_copy_infer(self, prebinded_inputs: dict, prebinded_outputs: dict, share_with_raw_ptr=True, **kwargs):
72
- """
73
- Execute inference without copying data from cpu to gpu.
74
-
75
- Arguments:
76
- kwargs (`dict(name, paddle.Tensor)`):
77
- An input map from name to tensor.
78
- Return:
79
- List of output tensor.
80
- """
81
- for inputs_name, inputs_tensor in prebinded_inputs.items():
82
- input_fdtensor = pdtensor2fdtensor(inputs_tensor, inputs_name, share_with_raw_ptr=share_with_raw_ptr)
83
- self.model.bind_input_tensor(inputs_name, input_fdtensor)
84
-
85
- for outputs_name, outputs_tensor in prebinded_outputs.items():
86
- output_fdtensor = pdtensor2fdtensor(outputs_tensor, outputs_name, share_with_raw_ptr=share_with_raw_ptr)
87
- self.model.bind_output_tensor(outputs_name, output_fdtensor)
88
-
89
- self.model.zero_copy_infer()
90
-
91
- def __call__(self, **kwargs):
92
- inputs = {k: np.array(v) for k, v in kwargs.items()}
93
- return self.model.infer(inputs)
94
-
95
- @staticmethod
96
- def load_model(
97
- model_path: Union[str, Path],
98
- params_path: Union[str, Path],
99
- runtime_options: Optional["fd.RuntimeOption"] = None,
100
- ):
101
- """
102
- Loads an FastDeploy Inference Model with fastdeploy.RuntimeOption
103
-
104
- Arguments:
105
- model_path (`str` or `Path`):
106
- Model path from which to load
107
- params_path (`str` or `Path`):
108
- Params path from which to load
109
- runtime_options (fd.RuntimeOption, *optional*):
110
- The RuntimeOption of fastdeploy to initialize the fastdeploy runtime. Default setting
111
- the device to cpu and the backend to paddle inference
112
- """
113
- option = runtime_options
114
- if option is None or not isinstance(runtime_options, fd.RuntimeOption):
115
- logger.info("No fastdeploy.RuntimeOption specified, using CPU device and paddle inference backend.")
116
- option = fd.RuntimeOption()
117
- option.use_paddle_backend()
118
- option.use_cpu()
119
- option.set_model_path(model_path, params_path)
120
- return fd.Runtime(option)
121
-
122
- def _save_pretrained(
123
- self,
124
- save_directory: Union[str, Path],
125
- model_file_name: Optional[str] = None,
126
- params_file_name: Optional[str] = None,
127
- **kwargs
128
- ):
129
- """
130
- Save a model and its configuration file to a directory, so that it can be re-loaded using the
131
- [`~FastDeployRuntimeModel.from_pretrained`] class method. It will always save the
132
- latest_model_name.
133
-
134
- Arguments:
135
- save_directory (`str` or `Path`):
136
- Directory where to save the model file.
137
- model_file_name(`str`, *optional*):
138
- Overwrites the default model file name from `"inference.pdmodel"` to `model_file_name`. This allows you to save the
139
- model with a different name.
140
- params_file_name(`str`, *optional*):
141
- Overwrites the default model file name from `"inference.pdiparams"` to `params_file_name`. This allows you to save the
142
- model with a different name.
143
- """
144
-
145
- model_file_name = model_file_name if model_file_name is not None else FASTDEPLOY_MODEL_NAME
146
- params_file_name = params_file_name if params_file_name is not None else FASTDEPLOY_WEIGHTS_NAME
147
-
148
- src_model_path = self.model_save_dir.joinpath(self.latest_model_name)
149
- dst_model_path = Path(save_directory).joinpath(model_file_name)
150
-
151
- src_params_path = self.model_save_dir.joinpath(self.latest_params_name)
152
- dst_params_path = Path(save_directory).joinpath(params_file_name)
153
- try:
154
- shutil.copyfile(src_model_path, dst_model_path)
155
- shutil.copyfile(src_params_path, dst_params_path)
156
- except shutil.SameFileError:
157
- pass
158
-
159
- def save_pretrained(
160
- self,
161
- save_directory: Union[str, os.PathLike],
162
- **kwargs,
163
- ):
164
- """
165
- Save a model to a directory, so that it can be re-loaded using the [`~FastDeployRuntimeModel.from_pretrained`] class
166
- method.:
167
-
168
- Arguments:
169
- save_directory (`str` or `os.PathLike`):
170
- Directory to which to save. Will be created if it doesn't exist.
171
- """
172
- if os.path.isfile(save_directory):
173
- logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
174
- return
175
-
176
- os.makedirs(save_directory, exist_ok=True)
177
-
178
- # saving model weights/files
179
- self._save_pretrained(save_directory, **kwargs)
180
-
181
- @classmethod
182
- def _from_pretrained(
183
- cls,
184
- pretrained_model_name_or_path: Union[str, Path],
185
- cache_dir: Optional[str] = None,
186
- model_file_name: Optional[str] = None,
187
- params_file_name: Optional[str] = None,
188
- runtime_options: Optional["fd.RuntimeOption"] = None,
189
- **kwargs,
190
- ):
191
- """
192
- Load a model from a directory or the BOS.
193
-
194
- Arguments:
195
- pretrained_model_name_or_path (`str` or `Path`):
196
- Directory from which to load
197
- cache_dir (`Union[str, Path]`, *optional*):
198
- Path to a directory in which a downloaded pretrained model configuration should be cached if the
199
- standard cache should not be used.
200
- model_file_name (`str`):
201
- Overwrites the default model file name from `"inference.pdmodel"` to `file_name`. This allows you to load
202
- different model files from the same repository or directory.
203
- params_file_name (`str`):
204
- Overwrites the default params file name from `"inference.pdiparams"` to `file_name`. This allows you to load
205
- different model files from the same repository or directory.
206
- runtime_options (`fastdeploy.RuntimeOption`, *optional*):
207
- The RuntimeOption of fastdeploy.
208
- kwargs (`Dict`, *optional*):
209
- kwargs will be passed to the model during initialization
210
- """
211
- model_file_name = model_file_name if model_file_name is not None else FASTDEPLOY_MODEL_NAME
212
- params_file_name = params_file_name if params_file_name is not None else FASTDEPLOY_WEIGHTS_NAME
213
- # load model from local directory
214
- if os.path.isdir(pretrained_model_name_or_path):
215
- model = FastDeployRuntimeModel.load_model(
216
- os.path.join(pretrained_model_name_or_path, model_file_name),
217
- os.path.join(pretrained_model_name_or_path, params_file_name),
218
- runtime_options=runtime_options,
219
- )
220
- kwargs["model_save_dir"] = Path(pretrained_model_name_or_path)
221
- # load model from hub
222
- else:
223
- # download model
224
- model_cache_path = ppdiffusers_bos_download(
225
- pretrained_model_name_or_path=pretrained_model_name_or_path,
226
- filename=model_file_name,
227
- cache_dir=cache_dir,
228
- )
229
- # download params
230
- params_cache_path = ppdiffusers_bos_download(
231
- pretrained_model_name_or_path=pretrained_model_name_or_path,
232
- filename=params_file_name,
233
- cache_dir=cache_dir,
234
- )
235
- kwargs["model_save_dir"] = Path(model_cache_path).parent
236
- kwargs["latest_model_name"] = Path(model_cache_path).name
237
- kwargs["latest_params_name"] = Path(params_cache_path).name
238
- model = FastDeployRuntimeModel.load_model(
239
- model_cache_path, params_cache_path, runtime_options=runtime_options
240
- )
241
- return cls(model=model, **kwargs)
242
-
243
- @classmethod
244
- def from_pretrained(
245
- cls,
246
- pretrained_model_name_or_path: Union[str, Path],
247
- cache_dir: Optional[str] = None,
248
- model_file_name: Optional[str] = None,
249
- params_file_name: Optional[str] = None,
250
- runtime_options: Optional["fd.RuntimeOption"] = None,
251
- **model_kwargs,
252
- ):
253
- return cls._from_pretrained(
254
- pretrained_model_name_or_path=pretrained_model_name_or_path,
255
- cache_dir=cache_dir,
256
- model_file_name=model_file_name,
257
- params_file_name=params_file_name,
258
- runtime_options=runtime_options,
259
- **model_kwargs,
260
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/postcss.config.js DELETED
@@ -1,6 +0,0 @@
1
- module.exports = {
2
- plugins: {
3
- tailwindcss: {},
4
- autoprefixer: {},
5
- },
6
- }
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/speaker_encoder/params_model.py DELETED
@@ -1,11 +0,0 @@
1
-
2
- ## Model parameters
3
- model_hidden_size = 256
4
- model_embedding_size = 256
5
- model_num_layers = 3
6
-
7
-
8
- ## Training parameters
9
- learning_rate_init = 1e-4
10
- speakers_per_batch = 64
11
- utterances_per_speaker = 10
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/docs/ENCODEC.md DELETED
@@ -1,179 +0,0 @@
1
- # EnCodec: High Fidelity Neural Audio Compression
2
-
3
- AudioCraft provides the training code for EnCodec, a state-of-the-art deep learning
4
- based audio codec supporting both mono stereo audio, presented in the
5
- [High Fidelity Neural Audio Compression][arxiv] paper.
6
- Check out our [sample page][encodec_samples].
7
-
8
- ## Original EnCodec models
9
-
10
- The EnCodec models presented in High Fidelity Neural Audio Compression can be accessed
11
- and used with the [EnCodec repository](https://github.com/facebookresearch/encodec).
12
-
13
- **Note**: We do not guarantee compatibility between the AudioCraft and EnCodec codebases
14
- and released checkpoints at this stage.
15
-
16
-
17
- ## Installation
18
-
19
- Please follow the AudioCraft installation instructions from the [README](../README.md).
20
-
21
-
22
- ## Training
23
-
24
- The [CompressionSolver](../audiocraft/solvers/compression.py) implements the audio reconstruction
25
- task to train an EnCodec model. Specifically, it trains an encoder-decoder with a quantization
26
- bottleneck - a SEANet encoder-decoder with Residual Vector Quantization bottleneck for EnCodec -
27
- using a combination of objective and perceptual losses in the forms of discriminators.
28
-
29
- The default configuration matches a causal EnCodec training with at a single bandwidth.
30
-
31
- ### Example configuration and grids
32
-
33
- We provide sample configuration and grids for training EnCodec models.
34
-
35
- The compression configuration are defined in
36
- [config/solver/compression](../config/solver/compression).
37
-
38
- The example grids are available at
39
- [audiocraft/grids/compression](../audiocraft/grids/compression).
40
-
41
- ```shell
42
- # base causal encodec on monophonic audio sampled at 24 khz
43
- dora grid compression.encodec_base_24khz
44
- # encodec model used for MusicGen on monophonic audio sampled at 32 khz
45
- dora grid compression.encodec_musicgen_32khz
46
- ```
47
-
48
- ### Training and valid stages
49
-
50
- The model is trained using a combination of objective and perceptual losses.
51
- More specifically, EnCodec is trained with the MS-STFT discriminator along with
52
- objective losses through the use of a loss balancer to effectively weight
53
- the different losses, in an intuitive manner.
54
-
55
- ### Evaluation stage
56
-
57
- Evaluations metrics for audio generation:
58
- * SI-SNR: Scale-Invariant Signal-to-Noise Ratio.
59
- * ViSQOL: Virtual Speech Quality Objective Listener.
60
-
61
- Note: Path to the ViSQOL binary (compiled with bazel) needs to be provided in
62
- order to run the ViSQOL metric on the reference and degraded signals.
63
- The metric is disabled by default.
64
- Please refer to the [metrics documentation](../METRICS.md) to learn more.
65
-
66
- ### Generation stage
67
-
68
- The generation stage consists in generating the reconstructed audio from samples
69
- with the current model. The number of samples generated and the batch size used are
70
- controlled by the `dataset.generate` configuration. The output path and audio formats
71
- are defined in the generate stage configuration.
72
-
73
- ```shell
74
- # generate samples every 5 epoch
75
- dora run solver=compression/encodec_base_24khz generate.every=5
76
- # run with a different dset
77
- dora run solver=compression/encodec_base_24khz generate.path=<PATH_IN_DORA_XP_FOLDER>
78
- # limit the number of samples or use a different batch size
79
- dora grid solver=compression/encodec_base_24khz dataset.generate.num_samples=10 dataset.generate.batch_size=4
80
- ```
81
-
82
- ### Playing with the model
83
-
84
- Once you have a model trained, it is possible to get the entire solver, or just
85
- the trained model with the following functions:
86
-
87
- ```python
88
- from audiocraft.solvers import CompressionSolver
89
-
90
- # If you trained a custom model with signature SIG.
91
- model = CompressionSolver.model_from_checkpoint('//sig/SIG')
92
- # If you want to get one of the pretrained models with the `//pretrained/` prefix.
93
- model = CompressionSolver.model_from_checkpoint('//pretrained/facebook/encodec_32khz')
94
- # Or load from a custom checkpoint path
95
- model = CompressionSolver.model_from_checkpoint('/my_checkpoints/foo/bar/checkpoint.th')
96
-
97
-
98
- # If you only want to use a pretrained model, you can also directly get it
99
- # from the CompressionModel base model class.
100
- from audiocraft.models import CompressionModel
101
-
102
- # Here do not put the `//pretrained/` prefix!
103
- model = CompressionModel.get_pretrained('facebook/encodec_32khz')
104
- model = CompressionModel.get_pretrained('dac_44khz')
105
-
106
- # Finally, you can also retrieve the full Solver object, with its dataloader etc.
107
- from audiocraft import train
108
- from pathlib import Path
109
- import logging
110
- import os
111
- import sys
112
-
113
- # uncomment the following line if you want some detailed logs when loading a Solver.
114
- logging.basicConfig(stream=sys.stderr, level=logging.INFO)
115
- # You must always run the following function from the root directory.
116
- os.chdir(Path(train.__file__).parent.parent)
117
-
118
-
119
- # You can also get the full solver (only for your own experiments).
120
- # You can provide some overrides to the parameters to make things more convenient.
121
- solver = train.get_solver_from_sig('SIG', {'device': 'cpu', 'dataset': {'batch_size': 8}})
122
- solver.model
123
- solver.dataloaders
124
- ```
125
-
126
- ### Importing / Exporting models
127
-
128
- At the moment we do not have a definitive workflow for exporting EnCodec models, for
129
- instance to Hugging Face (HF). We are working on supporting automatic convertion between
130
- AudioCraft and Hugging Face implementations.
131
-
132
- We still have some support for fine tuning an EnCodec model coming from HF in AudioCraft,
133
- using for instance `continue_from=//pretrained/facebook/encodec_32k`.
134
-
135
- An AudioCraft checkpoint can be exported in a more compact format (excluding the optimizer etc.)
136
- using `audiocraft.utils.export.export_encodec`. For instance, you could run
137
-
138
- ```python
139
- from audiocraft.utils import export
140
- from audiocraft import train
141
- xp = train.main.get_xp_from_sig('SIG')
142
- export.export_encodec(
143
- xp.folder / 'checkpoint.th',
144
- '/checkpoints/my_audio_lm/compression_state_dict.bin')
145
-
146
-
147
- from audiocraft.models import CompressionModel
148
- model = CompressionModel.get_pretrained('/checkpoints/my_audio_lm/compression_state_dict.bin')
149
-
150
- from audiocraft.solvers import CompressionSolver
151
- # The two are strictly equivalent, but this function supports also loading from non already exported models.
152
- model = CompressionSolver.model_from_checkpoint('//pretrained//checkpoints/my_audio_lm/compression_state_dict.bin')
153
- ```
154
-
155
- We will see then how to use this model as a tokenizer for MusicGen/Audio gen in the
156
- [MusicGen documentation](./MUSICGEN.md).
157
-
158
- ### Learn more
159
-
160
- Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md).
161
-
162
-
163
- ## Citation
164
- ```
165
- @article{defossez2022highfi,
166
- title={High Fidelity Neural Audio Compression},
167
- author={Défossez, Alexandre and Copet, Jade and Synnaeve, Gabriel and Adi, Yossi},
168
- journal={arXiv preprint arXiv:2210.13438},
169
- year={2022}
170
- }
171
- ```
172
-
173
-
174
- ## License
175
-
176
- See license information in the [README](../README.md).
177
-
178
- [arxiv]: https://arxiv.org/abs/2210.13438
179
- [encodec_samples]: https://ai.honu.io/papers/encodec/samples.html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/dataset_tokenize.py DELETED
@@ -1,117 +0,0 @@
1
- import torch
2
- from torch.utils import data
3
- import numpy as np
4
- from os.path import join as pjoin
5
- import random
6
- import codecs as cs
7
- from tqdm import tqdm
8
-
9
-
10
-
11
- class VQMotionDataset(data.Dataset):
12
- def __init__(self, dataset_name, feat_bias = 5, window_size = 64, unit_length = 8):
13
- self.window_size = window_size
14
- self.unit_length = unit_length
15
- self.feat_bias = feat_bias
16
-
17
- self.dataset_name = dataset_name
18
- min_motion_len = 40 if dataset_name =='t2m' else 24
19
-
20
- if dataset_name == 't2m':
21
- self.data_root = './dataset/HumanML3D'
22
- self.motion_dir = pjoin(self.data_root, 'new_joint_vecs')
23
- self.text_dir = pjoin(self.data_root, 'texts')
24
- self.joints_num = 22
25
- radius = 4
26
- fps = 20
27
- self.max_motion_length = 196
28
- dim_pose = 263
29
- self.meta_dir = 'checkpoints/t2m/VQVAEV3_CB1024_CMT_H1024_NRES3/meta'
30
- #kinematic_chain = paramUtil.t2m_kinematic_chain
31
- elif dataset_name == 'kit':
32
- self.data_root = './dataset/KIT-ML'
33
- self.motion_dir = pjoin(self.data_root, 'new_joint_vecs')
34
- self.text_dir = pjoin(self.data_root, 'texts')
35
- self.joints_num = 21
36
- radius = 240 * 8
37
- fps = 12.5
38
- dim_pose = 251
39
- self.max_motion_length = 196
40
- self.meta_dir = 'checkpoints/kit/VQVAEV3_CB1024_CMT_H1024_NRES3/meta'
41
- #kinematic_chain = paramUtil.kit_kinematic_chain
42
-
43
- joints_num = self.joints_num
44
-
45
- mean = np.load(pjoin(self.meta_dir, 'mean.npy'))
46
- std = np.load(pjoin(self.meta_dir, 'std.npy'))
47
-
48
- split_file = pjoin(self.data_root, 'train.txt')
49
-
50
- data_dict = {}
51
- id_list = []
52
- with cs.open(split_file, 'r') as f:
53
- for line in f.readlines():
54
- id_list.append(line.strip())
55
-
56
- new_name_list = []
57
- length_list = []
58
- for name in tqdm(id_list):
59
- try:
60
- motion = np.load(pjoin(self.motion_dir, name + '.npy'))
61
- if (len(motion)) < min_motion_len or (len(motion) >= 200):
62
- continue
63
-
64
- data_dict[name] = {'motion': motion,
65
- 'length': len(motion),
66
- 'name': name}
67
- new_name_list.append(name)
68
- length_list.append(len(motion))
69
- except:
70
- # Some motion may not exist in KIT dataset
71
- pass
72
-
73
-
74
- self.mean = mean
75
- self.std = std
76
- self.length_arr = np.array(length_list)
77
- self.data_dict = data_dict
78
- self.name_list = new_name_list
79
-
80
- def inv_transform(self, data):
81
- return data * self.std + self.mean
82
-
83
- def __len__(self):
84
- return len(self.data_dict)
85
-
86
- def __getitem__(self, item):
87
- name = self.name_list[item]
88
- data = self.data_dict[name]
89
- motion, m_length = data['motion'], data['length']
90
-
91
- m_length = (m_length // self.unit_length) * self.unit_length
92
-
93
- idx = random.randint(0, len(motion) - m_length)
94
- motion = motion[idx:idx+m_length]
95
-
96
- "Z Normalization"
97
- motion = (motion - self.mean) / self.std
98
-
99
- return motion, name
100
-
101
- def DATALoader(dataset_name,
102
- batch_size = 1,
103
- num_workers = 8, unit_length = 4) :
104
-
105
- train_loader = torch.utils.data.DataLoader(VQMotionDataset(dataset_name, unit_length=unit_length),
106
- batch_size,
107
- shuffle=True,
108
- num_workers=num_workers,
109
- #collate_fn=collate_fn,
110
- drop_last = True)
111
-
112
- return train_loader
113
-
114
- def cycle(iterable):
115
- while True:
116
- for x in iterable:
117
- yield x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- from ldm.modules.losses_audio.vqperceptual import DummyLoss
2
-
3
- # relative imports pain
4
- import os
5
- import sys
6
- path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggishish')
7
- sys.path.append(path)
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/loss.py DELETED
@@ -1,41 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import torch.optim as optim
5
-
6
- class WeightedCrossEntropy(nn.CrossEntropyLoss):
7
-
8
- def __init__(self, weights, **pytorch_ce_loss_args) -> None:
9
- super().__init__(reduction='none', **pytorch_ce_loss_args)
10
- self.weights = weights
11
-
12
- def __call__(self, outputs, targets, to_weight=True):
13
- loss = super().__call__(outputs, targets)
14
- if to_weight:
15
- return (loss * self.weights[targets]).sum() / self.weights[targets].sum()
16
- else:
17
- return loss.mean()
18
-
19
-
20
- if __name__ == '__main__':
21
- x = torch.randn(10, 5)
22
- target = torch.randint(0, 5, (10,))
23
- weights = torch.tensor([1., 2., 3., 4., 5.])
24
-
25
- # criterion_weighted = nn.CrossEntropyLoss(weight=weights)
26
- # loss_weighted = criterion_weighted(x, target)
27
-
28
- # criterion_weighted_manual = nn.CrossEntropyLoss(reduction='none')
29
- # loss_weighted_manual = criterion_weighted_manual(x, target)
30
- # print(loss_weighted, loss_weighted_manual.mean())
31
- # loss_weighted_manual = (loss_weighted_manual * weights[target]).sum() / weights[target].sum()
32
- # print(loss_weighted, loss_weighted_manual)
33
- # print(torch.allclose(loss_weighted, loss_weighted_manual))
34
-
35
- pytorch_weighted = nn.CrossEntropyLoss(weight=weights)
36
- pytorch_unweighted = nn.CrossEntropyLoss()
37
- custom = WeightedCrossEntropy(weights)
38
-
39
- assert torch.allclose(pytorch_weighted(x, target), custom(x, target, to_weight=True))
40
- assert torch.allclose(pytorch_unweighted(x, target), custom(x, target, to_weight=False))
41
- print(custom(x, target, to_weight=True), custom(x, target, to_weight=False))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/__init__.py DELETED
File without changes
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/config_manager.py DELETED
@@ -1,350 +0,0 @@
1
- from enum import Enum
2
- import os
3
- from pathlib import Path
4
- import shutil
5
- import subprocess
6
- from typing import Any, Dict
7
-
8
- import ruamel.yaml
9
- import torch
10
-
11
- from poetry_diacritizer.models.baseline import BaseLineModel
12
- from poetry_diacritizer.models.cbhg import CBHGModel
13
- from poetry_diacritizer.models.gpt import GPTModel
14
- from poetry_diacritizer.models.seq2seq import Decoder as Seq2SeqDecoder, Encoder as Seq2SeqEncoder, Seq2Seq
15
- from poetry_diacritizer.models.tacotron_based import (
16
- Decoder as TacotronDecoder,
17
- Encoder as TacotronEncoder,
18
- Tacotron,
19
- )
20
-
21
- from poetry_diacritizer.options import AttentionType, LossType, OptimizerType
22
- from poetry_diacritizer.util.text_encoders import (
23
- ArabicEncoderWithStartSymbol,
24
- BasicArabicEncoder,
25
- TextEncoder,
26
- )
27
-
28
-
29
- class ConfigManager:
30
- """Co/home/almodhfer/Projects/daicritization/temp_results/CA_MSA/cbhg-new/model-10.ptnfig Manager"""
31
-
32
- def __init__(self, config_path: str, model_kind: str):
33
- available_models = ["baseline", "cbhg", "seq2seq", "tacotron_based", "gpt"]
34
- if model_kind not in available_models:
35
- raise TypeError(f"model_kind must be in {available_models}")
36
- self.config_path = Path(config_path)
37
- self.model_kind = model_kind
38
- self.yaml = ruamel.yaml.YAML()
39
- self.config: Dict[str, Any] = self._load_config()
40
- self.git_hash = self._get_git_hash()
41
- self.session_name = ".".join(
42
- [
43
- self.config["data_type"],
44
- self.config["session_name"],
45
- f"{model_kind}",
46
- ]
47
- )
48
-
49
- self.data_dir = Path(
50
- os.path.join(self.config["data_directory"], self.config["data_type"])
51
- )
52
- self.base_dir = Path(
53
- os.path.join(self.config["log_directory"], self.session_name)
54
- )
55
- self.log_dir = Path(os.path.join(self.base_dir, "logs"))
56
- self.prediction_dir = Path(os.path.join(self.base_dir, "predictions"))
57
- self.plot_dir = Path(os.path.join(self.base_dir, "plots"))
58
- self.models_dir = Path(os.path.join(self.base_dir, "models"))
59
- if "sp_model_path" in self.config:
60
- self.sp_model_path = self.config["sp_model_path"]
61
- else:
62
- self.sp_model_path = None
63
- self.text_encoder: TextEncoder = self.get_text_encoder()
64
- self.config["len_input_symbols"] = len(self.text_encoder.input_symbols)
65
- self.config["len_target_symbols"] = len(self.text_encoder.target_symbols)
66
- if self.model_kind in ["seq2seq", "tacotron_based"]:
67
- self.config["attention_type"] = AttentionType[self.config["attention_type"]]
68
- self.config["optimizer"] = OptimizerType[self.config["optimizer_type"]]
69
-
70
- def _load_config(self):
71
- with open(self.config_path, "rb") as model_yaml:
72
- _config = self.yaml.load(model_yaml)
73
- return _config
74
-
75
- @staticmethod
76
- def _get_git_hash():
77
- try:
78
- return (
79
- subprocess.check_output(["git", "describe", "--always"])
80
- .strip()
81
- .decode()
82
- )
83
- except Exception as e:
84
- print(f"WARNING: could not retrieve git hash. {e}")
85
-
86
- def _check_hash(self):
87
- try:
88
- git_hash = (
89
- subprocess.check_output(["git", "describe", "--always"])
90
- .strip()
91
- .decode()
92
- )
93
- if self.config["git_hash"] != git_hash:
94
- print(
95
- f"""WARNING: git hash mismatch. Current: {git_hash}.
96
- Config hash: {self.config['git_hash']}"""
97
- )
98
- except Exception as e:
99
- print(f"WARNING: could not check git hash. {e}")
100
-
101
- @staticmethod
102
- def _print_dict_values(values, key_name, level=0, tab_size=2):
103
- tab = level * tab_size * " "
104
- print(tab + "-", key_name, ":", values)
105
-
106
- def _print_dictionary(self, dictionary, recursion_level=0):
107
- for key in dictionary.keys():
108
- if isinstance(key, dict):
109
- recursion_level += 1
110
- self._print_dictionary(dictionary[key], recursion_level)
111
- else:
112
- self._print_dict_values(
113
- dictionary[key], key_name=key, level=recursion_level
114
- )
115
-
116
- def print_config(self):
117
- print("\nCONFIGURATION", self.session_name)
118
- self._print_dictionary(self.config)
119
-
120
- def update_config(self):
121
- self.config["git_hash"] = self._get_git_hash()
122
-
123
- def dump_config(self):
124
- self.update_config()
125
- _config = {}
126
- for key, val in self.config.items():
127
- if isinstance(val, Enum):
128
- _config[key] = val.name
129
- else:
130
- _config[key] = val
131
- with open(self.base_dir / "config.yml", "w") as model_yaml:
132
- self.yaml.dump(_config, model_yaml)
133
-
134
- def create_remove_dirs(
135
- self,
136
- clear_dir: bool = False,
137
- clear_logs: bool = False,
138
- clear_weights: bool = False,
139
- clear_all: bool = False,
140
- ):
141
- self.base_dir.mkdir(exist_ok=True, parents=True)
142
- self.plot_dir.mkdir(exist_ok=True)
143
- self.prediction_dir.mkdir(exist_ok=True)
144
- if clear_dir:
145
- delete = input(f"Delete {self.log_dir} AND {self.models_dir}? (y/[n])")
146
- if delete == "y":
147
- shutil.rmtree(self.log_dir, ignore_errors=True)
148
- shutil.rmtree(self.models_dir, ignore_errors=True)
149
- if clear_logs:
150
- delete = input(f"Delete {self.log_dir}? (y/[n])")
151
- if delete == "y":
152
- shutil.rmtree(self.log_dir, ignore_errors=True)
153
- if clear_weights:
154
- delete = input(f"Delete {self.models_dir}? (y/[n])")
155
- if delete == "y":
156
- shutil.rmtree(self.models_dir, ignore_errors=True)
157
- self.log_dir.mkdir(exist_ok=True)
158
- self.models_dir.mkdir(exist_ok=True)
159
-
160
- def get_last_model_path(self):
161
- """
162
- Given a checkpoint, get the last save model name
163
- Args:
164
- checkpoint (str): the path where models are saved
165
- """
166
- models = os.listdir(self.models_dir)
167
- models = [model for model in models if model[-3:] == ".pt"]
168
- if len(models) == 0:
169
- return None
170
- _max = max(int(m.split(".")[0].split("-")[0]) for m in models)
171
- model_name = f"{_max}-snapshot.pt"
172
- last_model_path = os.path.join(self.models_dir, model_name)
173
-
174
- return last_model_path
175
-
176
- def load_model(self, model_path: str = None):
177
- """
178
- loading a model from path
179
- Args:
180
- checkpoint (str): the path to the model
181
- name (str): the name of the model, which is in the path
182
- model (Tacotron): the model to load its save state
183
- optimizer: the optimizer to load its saved state
184
- """
185
-
186
- model = self.get_model()
187
-
188
- with open(self.base_dir / f"{self.model_kind}_network.txt", "w") as file:
189
- file.write(str(model))
190
-
191
- if model_path is None:
192
- last_model_path = self.get_last_model_path()
193
- if last_model_path is None:
194
- return model, 1
195
- else:
196
- last_model_path = model_path
197
-
198
- saved_model = torch.load(last_model_path)
199
- out = model.load_state_dict(saved_model["model_state_dict"])
200
- print(out)
201
- global_step = saved_model["global_step"] + 1
202
- return model, global_step
203
-
204
- def get_model(self, ignore_hash=False):
205
- if not ignore_hash:
206
- self._check_hash()
207
- if self.model_kind == "cbhg":
208
- return self.get_cbhg()
209
-
210
- elif self.model_kind == "seq2seq":
211
- return self.get_seq2seq()
212
-
213
- elif self.model_kind == "tacotron_based":
214
- return self.get_tacotron_based()
215
-
216
- elif self.model_kind == "baseline":
217
- return self.get_baseline()
218
-
219
- elif self.model_kind == "gpt":
220
- return self.get_gpt()
221
-
222
- def get_gpt(self):
223
- model = GPTModel(
224
- self.config["base_model_path"],
225
- freeze=self.config["freeze"],
226
- n_layer=self.config["n_layer"],
227
- use_lstm=self.config["use_lstm"],
228
- )
229
- return model
230
-
231
- def get_baseline(self):
232
- model = BaseLineModel(
233
- embedding_dim=self.config["embedding_dim"],
234
- inp_vocab_size=self.config["len_input_symbols"],
235
- targ_vocab_size=self.config["len_target_symbols"],
236
- layers_units=self.config["layers_units"],
237
- use_batch_norm=self.config["use_batch_norm"],
238
- )
239
-
240
- return model
241
-
242
- def get_cbhg(self):
243
- model = CBHGModel(
244
- embedding_dim=self.config["embedding_dim"],
245
- inp_vocab_size=self.config["len_input_symbols"],
246
- targ_vocab_size=self.config["len_target_symbols"],
247
- use_prenet=self.config["use_prenet"],
248
- prenet_sizes=self.config["prenet_sizes"],
249
- cbhg_gru_units=self.config["cbhg_gru_units"],
250
- cbhg_filters=self.config["cbhg_filters"],
251
- cbhg_projections=self.config["cbhg_projections"],
252
- post_cbhg_layers_units=self.config["post_cbhg_layers_units"],
253
- post_cbhg_use_batch_norm=self.config["post_cbhg_use_batch_norm"],
254
- )
255
-
256
- return model
257
-
258
- def get_seq2seq(self):
259
- encoder = Seq2SeqEncoder(
260
- embedding_dim=self.config["encoder_embedding_dim"],
261
- inp_vocab_size=self.config["len_input_symbols"],
262
- layers_units=self.config["encoder_units"],
263
- use_batch_norm=self.config["use_batch_norm"],
264
- )
265
-
266
- decoder = TacotronDecoder(
267
- self.config["len_target_symbols"],
268
- start_symbol_id=self.text_encoder.start_symbol_id,
269
- embedding_dim=self.config["decoder_embedding_dim"],
270
- encoder_dim=self.config["encoder_dim"],
271
- decoder_units=self.config["decoder_units"],
272
- decoder_layers=self.config["decoder_layers"],
273
- attention_type=self.config["attention_type"],
274
- attention_units=self.config["attention_units"],
275
- is_attention_accumulative=self.config["is_attention_accumulative"],
276
- use_prenet=self.config["use_decoder_prenet"],
277
- prenet_depth=self.config["decoder_prenet_depth"],
278
- teacher_forcing_probability=self.config["teacher_forcing_probability"],
279
- )
280
-
281
- model = Tacotron(encoder=encoder, decoder=decoder)
282
-
283
- return model
284
-
285
- def get_tacotron_based(self):
286
- encoder = TacotronEncoder(
287
- embedding_dim=self.config["encoder_embedding_dim"],
288
- inp_vocab_size=self.config["len_input_symbols"],
289
- prenet_sizes=self.config["prenet_sizes"],
290
- use_prenet=self.config["use_encoder_prenet"],
291
- cbhg_gru_units=self.config["cbhg_gru_units"],
292
- cbhg_filters=self.config["cbhg_filters"],
293
- cbhg_projections=self.config["cbhg_projections"],
294
- )
295
-
296
- decoder = TacotronDecoder(
297
- self.config["len_target_symbols"],
298
- start_symbol_id=self.text_encoder.start_symbol_id,
299
- embedding_dim=self.config["decoder_embedding_dim"],
300
- encoder_dim=self.config["encoder_dim"],
301
- decoder_units=self.config["decoder_units"],
302
- decoder_layers=self.config["decoder_layers"],
303
- attention_type=self.config["attention_type"],
304
- attention_units=self.config["attention_units"],
305
- is_attention_accumulative=self.config["is_attention_accumulative"],
306
- use_prenet=self.config["use_decoder_prenet"],
307
- prenet_depth=self.config["decoder_prenet_depth"],
308
- teacher_forcing_probability=self.config["teacher_forcing_probability"],
309
- )
310
-
311
- model = Tacotron(encoder=encoder, decoder=decoder)
312
-
313
- return model
314
-
315
- def get_text_encoder(self):
316
- """Getting the class of TextEncoder from config"""
317
- if self.config["text_cleaner"] not in [
318
- "basic_cleaners",
319
- "valid_arabic_cleaners",
320
- None,
321
- ]:
322
- raise Exception(f"cleaner is not known {self.config['text_cleaner']}")
323
-
324
- if self.config["text_encoder"] == "BasicArabicEncoder":
325
- text_encoder = BasicArabicEncoder(
326
- cleaner_fn=self.config["text_cleaner"], sp_model_path=self.sp_model_path
327
- )
328
- elif self.config["text_encoder"] == "ArabicEncoderWithStartSymbol":
329
- text_encoder = ArabicEncoderWithStartSymbol(
330
- cleaner_fn=self.config["text_cleaner"], sp_model_path=self.sp_model_path
331
- )
332
- else:
333
- raise Exception(
334
- f"the text encoder is not found {self.config['text_encoder']}"
335
- )
336
-
337
- return text_encoder
338
-
339
- def get_loss_type(self):
340
- try:
341
- loss_type = LossType[self.config["loss_type"]]
342
- except:
343
- raise Exception(f"The loss type is not correct {self.config['loss_type']}")
344
- return loss_type
345
-
346
-
347
- if __name__ == "__main__":
348
- config_path = "config/tacotron-base-config.yml"
349
- model_kind = "tacotron"
350
- config = ConfigManager(config_path=config_path, model_kind=model_kind)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/Methods.js DELETED
@@ -1,108 +0,0 @@
1
- import GetSizerConfig from './GetSizerConfig.js';
2
- import GetChildPrevState from '../utils/GetChildPrevState.js';
3
- import PushIntoBounds from './PushIntoBounds.js';
4
- import DrawBounds from './DrawBounds.js';
5
- import AddChildMethods from './AddChildMethods.js';
6
- import RemoveChildMethods from './RemoveChildMethods.js';
7
- import AddChildrenMap from './AddChildrenMap.js';
8
- import RemoveChildrenMap from './RemoveChildrenMap.js';
9
- import GetElement from './GetElement.js';
10
- import PaddingMethods from './PaddingMethods.js';
11
- import ResolveWidth from './ResolveWidth.js';
12
- import ResolveChildrenWidth from './ResolveChildrenWidth.js';
13
- import ResolveHeight from './ResolveHeight.js';
14
- import PostResolveSize from './PostResolveSize.js';
15
- import GetChildWidth from './GetChildWidth.js';
16
- import GetChildHeight from './GetChildHeight.js';
17
- import GetExpandedChildWidth from './GetExpandedChildWidth.js';
18
- import GetExpandedChildHeight from './GetExpandedChildHeight.js';
19
- import GetChildrenWidth from './GetChildrenWidth.js';
20
- import GetChildrenHeight from './GetChildrenHeight.js';
21
- import GetAllChildrenSizers from './GetAllChildrenSizers.js';
22
- import GetChildrenSizers from './GetChildrenSizers.js';
23
- import GetShownChildrenMethods from './GetShownChildrenMethods.js';
24
- import PreLayout from './PreLayout.js';
25
- import Layout from './Layout.js';
26
- import RunLayout from './RunLayout.js';
27
- import LayoutChildren from './LayoutChildren.js';
28
- import PostLayout from './PostLayout.js';
29
- import RunWidthWrap from './RunWidthWrap.js';
30
-
31
- import SetAnchor from './SetAnchor.js';
32
- import ScaleMethods from './ScaleMethods.js';
33
- import FadeMethods from './FadeMethods.js';
34
- import EaseMoveMethods from './EaseMoveMethods.js';
35
- import ShakeMethods from './ShakeMethods.js';
36
- import EaseDataMethods from './EaseDataMethods.js';
37
- import HideMethods from './HideMethods.js';
38
- import ModalMethods from './ModalMethods.js';
39
- import IsInTouching from './IsInTouching.js';
40
- import PointToChild from './PointToChild.js';
41
- import GetParentSizerMethods from './GetParentSizerMethods.js';
42
- import LayoutBackgrounds from './LayoutBackgrounds.js';
43
- import SetDraggable from './SetDraggable.js';
44
- import ClickMethods from './ClickMethods.js';
45
- import ClickOutsideMethods from './ClickOutsideMethods.js';
46
- import TouchingMethods from './TouchingMethods.js';
47
- import SetChildrenInteractive from './SetChildrenInteractive.js';
48
- import BroadcastEvent from './BroadcastEvent.js';
49
-
50
- var methods = {
51
- getSizerConfig: GetSizerConfig,
52
- getChildPrevState: GetChildPrevState,
53
- pushIntoBounds: PushIntoBounds,
54
- drawBounds: DrawBounds,
55
- resolveWidth: ResolveWidth,
56
- resolveChildrenWidth: ResolveChildrenWidth,
57
- resolveHeight: ResolveHeight,
58
- postResolveSize: PostResolveSize,
59
- getChildWidth: GetChildWidth,
60
- getChildHeight: GetChildHeight,
61
- getExpandedChildWidth: GetExpandedChildWidth,
62
- getExpandedChildHeight: GetExpandedChildHeight,
63
-
64
- getChildrenWidth: GetChildrenWidth,
65
- getChildrenHeight: GetChildrenHeight,
66
- addChildrenMap: AddChildrenMap,
67
- addElement: AddChildrenMap,
68
- removeChildrenMap: RemoveChildrenMap,
69
- getElement: GetElement,
70
- getAllChildrenSizers: GetAllChildrenSizers,
71
- getChildrenSizers: GetChildrenSizers,
72
- preLayout: PreLayout,
73
- layout: Layout,
74
- runLayout: RunLayout,
75
- layoutChildren: LayoutChildren,
76
- runWidthWrap: RunWidthWrap,
77
- layoutBackgrounds: LayoutBackgrounds,
78
- postLayout: PostLayout,
79
-
80
- setAnchor: SetAnchor,
81
- isInTouching: IsInTouching,
82
- pointToChild: PointToChild,
83
- setDraggable: SetDraggable,
84
- setChildrenInteractive: SetChildrenInteractive,
85
- broadcastEvent: BroadcastEvent,
86
-
87
- };
88
-
89
- Object.assign(
90
- methods,
91
- PaddingMethods,
92
- AddChildMethods,
93
- RemoveChildMethods,
94
- GetParentSizerMethods,
95
- ScaleMethods,
96
- FadeMethods,
97
- EaseMoveMethods,
98
- ShakeMethods,
99
- EaseDataMethods,
100
- ClickMethods,
101
- ClickOutsideMethods,
102
- TouchingMethods,
103
- HideMethods,
104
- ModalMethods,
105
- GetShownChildrenMethods,
106
- );
107
-
108
- export default methods;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/LayoutChildren.js DELETED
@@ -1,98 +0,0 @@
1
- import ResizeGameObject from '../../../plugins/utils/size/ResizeGameObject.js';
2
- import PreLayoutChild from '../basesizer/utils/PreLayoutChild.js';
3
- import LayoutChild from '../basesizer/utils/LayoutChild.js';
4
- import { GetDisplayWidth, GetDisplayHeight } from '../../../plugins/utils/size/GetDisplaySize.js';
5
- import CheckSize from '../basesizer/utils/CheckSize.js';
6
-
7
- const Wrap = Phaser.Math.Wrap;
8
-
9
- var LayoutChildren = function () {
10
- var children = this.sizerChildren;
11
- var child, childConfig, padding;
12
- var startX = this.innerLeft,
13
- startY = this.innerTop;
14
- var innerWidth = this.innerWidth;
15
- var innerHeight = this.innerHeight;
16
- var itemX = startX,
17
- itemY = startY;
18
- var x, y, width, height; // Align zone
19
- var childWidth, childHeight;
20
- var childIndex, startChildIndex = this.startChildIndex;
21
- for (var i = 0, cnt = children.length; i < cnt; i++) {
22
- if (startChildIndex === 0) {
23
- childIndex = i;
24
- } else {
25
- childIndex = Wrap((i + startChildIndex), 0, cnt);
26
- }
27
-
28
- if (this.rtl) {
29
- childIndex = cnt - childIndex - 1;
30
- }
31
-
32
- child = children[childIndex];
33
- if (child.rexSizer.hidden) {
34
- continue;
35
- }
36
-
37
- childConfig = child.rexSizer;
38
- padding = childConfig.padding;
39
-
40
- PreLayoutChild.call(this, child);
41
-
42
- // Set size
43
- if (child.isRexSpace) {
44
- childWidth = 0;
45
- childHeight = 0;
46
- } else {
47
- childWidth = this.getExpandedChildWidth(child);
48
- childHeight = this.getExpandedChildHeight(child);
49
- }
50
- if (child.isRexSizer) {
51
- child.runLayout(this, childWidth, childHeight);
52
- CheckSize(child, this);
53
- } else {
54
- ResizeGameObject(child, childWidth, childHeight);
55
- }
56
-
57
- if (childWidth === undefined) {
58
- childWidth = GetDisplayWidth(child);
59
- }
60
- if (childHeight === undefined) {
61
- childHeight = GetDisplayHeight(child);
62
- }
63
-
64
- // Set position
65
- if (this.orientation === 0) { // x
66
- x = (itemX + padding.left);
67
- if ((childConfig.proportion === 0) || (this.proportionLength === 0)) {
68
- width = childWidth;
69
- } else {
70
- width = (childConfig.proportion * this.proportionLength);
71
- }
72
-
73
- y = (itemY + padding.top);
74
- height = (innerHeight - padding.top - padding.bottom);
75
- } else { // y
76
- x = (itemX + padding.left);
77
- width = (innerWidth - padding.left - padding.right);
78
-
79
- y = (itemY + padding.top);
80
- if ((childConfig.proportion === 0) || (this.proportionLength === 0)) {
81
- height = childHeight;
82
- } else {
83
- height = (childConfig.proportion * this.proportionLength);
84
- }
85
- }
86
-
87
- LayoutChild.call(this, child, x, y, width, height, childConfig.align);
88
-
89
- if (this.orientation === 0) { // x
90
- itemX += (width + padding.left + padding.right + this.space.item);
91
- } else { // y
92
- itemY += (height + padding.top + padding.bottom + this.space.item);
93
- }
94
- }
95
-
96
- }
97
-
98
- export default LayoutChildren;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlanMars/QYL-AI-Space/modules/models/__init__.py DELETED
File without changes
spaces/AlanMars/QYL-AI-Space/modules/presets.py DELETED
@@ -1,242 +0,0 @@
1
- # -*- coding:utf-8 -*-
2
- import os
3
- from pathlib import Path
4
- import gradio as gr
5
- from .webui_locale import I18nAuto
6
-
7
- i18n = I18nAuto() # internationalization
8
-
9
- CHATGLM_MODEL = None
10
- CHATGLM_TOKENIZER = None
11
- LLAMA_MODEL = None
12
- LLAMA_INFERENCER = None
13
-
14
- # Users
15
- ANONYMOUS_USER = "anonymous"
16
-
17
- # ChatGPT 设置
18
- INITIAL_SYSTEM_PROMPT = "You are a helpful assistant."
19
- API_HOST = "api.openai.com"
20
- COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
21
- BALANCE_API_URL = "https://api.openai.com/dashboard/billing/credit_grants"
22
- USAGE_API_URL = "https://api.openai.com/dashboard/billing/usage"
23
- HISTORY_DIR = Path("history")
24
- HISTORY_DIR = "history"
25
- TEMPLATES_DIR = "templates"
26
- USERS_DIR = Path("users")
27
-
28
- # 错误信息
29
- STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀
30
- GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志")
31
- ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。")
32
- CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时
33
- READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时
34
- PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误
35
- SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误
36
- NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位
37
- NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容
38
- BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息
39
-
40
- TIMEOUT_STREAMING = 60 # 流式对话时的超时时间
41
- TIMEOUT_ALL = 200 # 非流式对话时的超时时间
42
- ENABLE_STREAMING_OPTION = False # 是否启用选择选择是否实时显示回答的勾选框
43
- HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
44
- CONCURRENT_COUNT = 50 # 允许同时使用的用户数量
45
-
46
- SIM_K = 5
47
- INDEX_QUERY_TEMPRATURE = 1.0
48
-
49
- CHUANHU_TITLE = i18n("启源力 AI 🤖")
50
-
51
- # CHUANHU_DESCRIPTION = i18n("原理工作室")
52
- CHUANHU_DESCRIPTION = i18n("")
53
-
54
- FOOTER = """<div class="versions">{versions}</div>"""
55
-
56
- APPEARANCE_SWITCHER = """
57
- <div style="display: flex; justify-content: space-between;">
58
- <span style="margin-top: 4px !important;">""" + i18n("切换亮暗色主题") + """</span>
59
- <span><label class="apSwitch" for="checkbox">
60
- <input type="checkbox" id="checkbox">
61
- <div class="apSlider"></div>
62
- </label></span>
63
- </div>
64
- """
65
-
66
- SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
67
-
68
- ONLINE_MODELS = [
69
- "gpt-3.5-turbo",
70
- "gpt-3.5-turbo-0301",
71
- "gpt-3.5-turbo-0613",
72
- "gpt-3.5-turbo-16k",
73
- "gpt-4",
74
- "gpt-4-0314",
75
- "gpt-4-0613",
76
- "gpt-4-32k",
77
- "gpt-4-32k-0314",
78
- "xmchat",
79
- "yuanai-1.0-base_10B",
80
- "yuanai-1.0-translate",
81
- "yuanai-1.0-dialog",
82
- "yuanai-1.0-rhythm_poems",
83
- ]
84
-
85
- LOCAL_MODELS = [
86
- "chatglm-6b",
87
- "chatglm-6b-int4",
88
- "chatglm-6b-int4-qe",
89
- "StableLM",
90
- "MOSS",
91
- "llama-7b-hf",
92
- "llama-13b-hf",
93
- "llama-30b-hf",
94
- "llama-65b-hf",
95
- ]
96
-
97
- if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
98
- MODELS = ONLINE_MODELS
99
- else:
100
- MODELS = ONLINE_MODELS + LOCAL_MODELS
101
-
102
- DEFAULT_MODEL = 0
103
-
104
- os.makedirs("models", exist_ok=True)
105
- os.makedirs("lora", exist_ok=True)
106
- os.makedirs("history", exist_ok=True)
107
- for dir_name in os.listdir("models"):
108
- if os.path.isdir(os.path.join("models", dir_name)):
109
- if dir_name not in MODELS:
110
- MODELS.append(dir_name)
111
-
112
- MODEL_TOKEN_LIMIT = {
113
- "gpt-3.5-turbo": 4096,
114
- "gpt-3.5-turbo-0301": 4096,
115
- "gpt-3.5-turbo-0613": 4096,
116
- "gpt-3.5-turbo-16k": 16384,
117
- "gpt-4": 8192,
118
- "gpt-4-0314": 8192,
119
- "gpt-4-32k": 32768,
120
- "gpt-4-32k-0314": 32768
121
- }
122
-
123
- TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
124
- DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限
125
- REDUCE_TOKEN_FACTOR = 0.8 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。
126
-
127
- REPLY_LANGUAGES = [
128
- "简体中文",
129
- "繁體中文",
130
- "English",
131
- "日本語",
132
- "Español",
133
- "Français",
134
- "Deutsch",
135
- "跟随问题语言(不稳定)"
136
- ]
137
-
138
- WEBSEARCH_PTOMPT_TEMPLATE = """\
139
- Web search results:
140
-
141
- {web_results}
142
- Current date: {current_date}
143
-
144
- Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
145
- Query: {query}
146
- Reply in {reply_language}
147
- """
148
-
149
- PROMPT_TEMPLATE = """\
150
- Context information is below.
151
- ---------------------
152
- {context_str}
153
- ---------------------
154
- Current date: {current_date}.
155
- Using the provided context information, write a comprehensive reply to the given query.
156
- Make sure to cite results using [number] notation after the reference.
157
- If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
158
- Use prior knowledge only if the given context didn't provide enough information.
159
- Answer the question: {query_str}
160
- Reply in {reply_language}
161
- """
162
-
163
- REFINE_TEMPLATE = """\
164
- The original question is as follows: {query_str}
165
- We have provided an existing answer: {existing_answer}
166
- We have the opportunity to refine the existing answer
167
- (only if needed) with some more context below.
168
- ------------
169
- {context_msg}
170
- ------------
171
- Given the new context, refine the original answer to better
172
- Reply in {reply_language}
173
- If the context isn't useful, return the original answer.
174
- """
175
-
176
- ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
177
-
178
- small_and_beautiful_theme = gr.themes.Soft(
179
- primary_hue=gr.themes.Color(
180
- c50="#EBFAF2",
181
- c100="#CFF3E1",
182
- c200="#A8EAC8",
183
- c300="#77DEA9",
184
- c400="#3FD086",
185
- c500="#02C160",
186
- c600="#06AE56",
187
- c700="#05974E",
188
- c800="#057F45",
189
- c900="#04673D",
190
- c950="#2E5541",
191
- name="small_and_beautiful",
192
- ),
193
- secondary_hue=gr.themes.Color(
194
- c50="#576b95",
195
- c100="#576b95",
196
- c200="#576b95",
197
- c300="#576b95",
198
- c400="#576b95",
199
- c500="#576b95",
200
- c600="#576b95",
201
- c700="#576b95",
202
- c800="#576b95",
203
- c900="#576b95",
204
- c950="#576b95",
205
- ),
206
- neutral_hue=gr.themes.Color(
207
- name="gray",
208
- c50="#f6f7f8",
209
- # c100="#f3f4f6",
210
- c100="#F2F2F2",
211
- c200="#e5e7eb",
212
- c300="#d1d5db",
213
- c400="#B2B2B2",
214
- c500="#808080",
215
- c600="#636363",
216
- c700="#515151",
217
- c800="#393939",
218
- # c900="#272727",
219
- c900="#2B2B2B",
220
- c950="#171717",
221
- ),
222
- radius_size=gr.themes.sizes.radius_sm,
223
- ).set(
224
- # button_primary_background_fill="*primary_500",
225
- button_primary_background_fill_dark="*primary_600",
226
- # button_primary_background_fill_hover="*primary_400",
227
- # button_primary_border_color="*primary_500",
228
- button_primary_border_color_dark="*primary_600",
229
- button_primary_text_color="wihte",
230
- button_primary_text_color_dark="white",
231
- button_secondary_background_fill="*neutral_100",
232
- button_secondary_background_fill_hover="*neutral_50",
233
- button_secondary_background_fill_dark="*neutral_900",
234
- button_secondary_text_color="*neutral_800",
235
- button_secondary_text_color_dark="white",
236
- # background_fill_primary="#F7F7F7",
237
- # background_fill_primary_dark="#1F1F1F",
238
- # block_title_text_color="*primary_500",
239
- block_title_background_fill_dark="*primary_900",
240
- block_label_background_fill_dark="*primary_900",
241
- input_background_fill="#F6F6F6",
242
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/index.html DELETED
@@ -1,20 +0,0 @@
1
- <!--
2
-
3
- =========================================================
4
- * Vue Notus - v1.1.0 based on Tailwind Starter Kit by Creative Tim
5
- =========================================================
6
-
7
- * Product Page: https://www.creative-tim.com/product/vue-notus
8
- * Copyright 2021 Creative Tim (https://www.creative-tim.com)
9
- * Licensed under MIT (https://github.com/creativetimofficial/vue-notus/blob/main/LICENSE.md)
10
-
11
- * Tailwind Starter Kit Page: https://www.creative-tim.com/learning-lab/tailwind-starter-kit/presentation
12
-
13
- * Coded by Creative Tim
14
-
15
- =========================================================
16
-
17
- * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
18
-
19
- -->
20
- <!doctype html><html lang="en"><head><meta charset="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><meta name="viewport" content="width=device-width,initial-scale=1"/><link rel="shortcut icon" href="/favicon.ico"/><link rel="apple-touch-icon" sizes="76x76" href="/apple-icon.png"/><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css"/><script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/mermaid/8.6.3/mermaid.min.js"></script><script src="/live2d/live2dcubismcore.min.js"></script><title>ChatGPT-Plugin</title><script defer="defer" type="module" src="/js/chunk-vendors.cd7b5e68.js"></script><script defer="defer" type="module" src="/js/app.bf8a14e9.js"></script><link href="/css/chunk-vendors.0ede84b4.css" rel="stylesheet"><link href="/css/app.4dc5e420.css" rel="stylesheet"><script defer="defer" src="/js/chunk-vendors-legacy.9281b25c.js" nomodule></script><script defer="defer" src="/js/app-legacy.8305dfab.js" nomodule></script></head><body class="text-blueGray-700 antialiased"><noscript><strong>We're sorry but vue-notus doesn't work properly without JavaScript enabled. Please enable it to continue.</strong></noscript><div id="app"></div></body></html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/test_eval_cer.py DELETED
@@ -1,96 +0,0 @@
1
- import unittest
2
- from cer import CER
3
-
4
- cer = CER()
5
- class TestCER(unittest.TestCase):
6
- def test_cer_case_sensitive(self):
7
- refs = ["Magyar Országgyűlés"]
8
- preds = ["Magyar Országgyűlés"]
9
- # S = 2, D = 0, I = 0, N = 11, CER = 2 / 11
10
- char_error_rate = cer.compute(predictions=preds, references=refs)
11
- self.assertTrue(abs(char_error_rate - 0.1818181818) < 1e-6)
12
-
13
- def test_cer_whitespace(self):
14
- refs = ["Farkasok voltak"]
15
- preds = ["Farkasokvoltak"]
16
- # S = , D = , I = 1, N = , CER = I / N
17
- char_error_rate = cer.compute(predictions=preds, references=refs)
18
- self.assertTrue(abs(char_error_rate - 0.) < 1e-6)
19
-
20
- refs = ["Farkasokvoltak"]
21
- preds = ["Ferkasok voltak"]
22
- # S = , D = 1, I = 0, N = 14, CER =
23
- char_error_rate = cer.compute(predictions=preds, references=refs)
24
- self.assertTrue(abs(char_error_rate - 0.) < 1e-6)
25
-
26
- # consecutive whitespaces case 1
27
- refs = ["Farkasok voltak"]
28
- preds = ["Farkasok voltak"]
29
- # S = 0, D = 0, I = 0, N = , CER = 0
30
- char_error_rate = cer.compute(predictions=preds, references=refs)
31
- self.assertTrue(abs(char_error_rate - 0.0) < 1e-6)
32
-
33
- # consecutive whitespaces case 2
34
- refs = ["Farkasok voltak"]
35
- preds = ["Farkasok voltak"]
36
- # S = 0, D = 0, I = 0, N = ?, CER = 0
37
- char_error_rate = cer.compute(predictions=preds, references=refs)
38
- self.assertTrue(abs(char_error_rate - 0.0) < 1e-6)
39
-
40
- def test_cer_sub(self):
41
- refs = ["Magyar"]
42
- preds = ["Megyar"]
43
- # S = 1, D = 0, I = 0, N = 6, CER = 0.125
44
- char_error_rate = cer.compute(predictions=preds, references=refs)
45
- self.assertTrue(abs(char_error_rate - 0.125) < 1e-6)
46
-
47
- def test_cer_del(self):
48
- refs = ["Farkasokvoltak"]
49
- preds = ["Farkasokavoltak"]
50
- # S = 0, D = 1, I = 0, N = 14, CER = 0.
51
- char_error_rate = cer.compute(predictions=preds, references=refs)
52
- self.assertTrue(abs(char_error_rate - 0.) < 1e-6)
53
-
54
- def test_cer_insert(self):
55
- refs = ["Farkasokvoltak"]
56
- preds = ["Farkasokoltak"]
57
- # S = 0, D = 0, I = 1, N = 14, CER = 0.
58
- char_error_rate = cer.compute(predictions=preds, references=refs)
59
- self.assertTrue(abs(char_error_rate - 0.) < 1e-6)
60
-
61
- def test_cer_equal(self):
62
- refs = ["Magyar"]
63
- char_error_rate = cer.compute(predictions=refs, references=refs)
64
- self.assertEqual(char_error_rate, 0.0)
65
-
66
- def test_cer_list_of_seqs(self):
67
- # ['Eötvös Loránd University','I love my daughter']
68
- refs = ["Eötvös Loránd Tudományegyetem", "szeretem a lányom"]
69
- char_error_rate = cer.compute(predictions=refs, references=refs)
70
- self.assertEqual(char_error_rate, 0.0)
71
-
72
- refs = ["diák", "Az arab nyelvet könnyű megtanulni!", "autó"]
73
- preds = ["dxák", "Az arab nyelvet könnyű megtanulni!", "autó"]
74
- # S = 1, D = 0, I = 0, N = 28, CER = 1 / 42
75
- char_error_rate = cer.compute(predictions=preds, references=refs)
76
- self.assertTrue(abs(char_error_rate - 0.0238095238) < 1e-6)
77
-
78
- def test_correlated_sentences(self):
79
- # Learn artificial intelligence to secure your future
80
- # Tanuljon mesterséges intelligenciát, hogy biztosítsa jövőjét
81
- refs = ["Tanuljon mesterséges intelligenciát,", " hogy biztosítsa jövőjét"]
82
- preds = ["Tanuljon mesterséges intelligenciát, hogy", " biztosítsa jövőjét"]
83
- # S = 0, D = 0, I = 1, N = 28, CER = 2 / 60
84
- # whitespace at the front of " biztosítsa jövőjét" will be strip during preporcessing
85
- # so need to insert 2 whitespaces
86
- char_error_rate = cer.compute(predictions=preds, references=refs, concatenate_texts=True)
87
- self.assertTrue(abs(char_error_rate - 0.03333333333) < 1e-6)
88
-
89
- def test_cer_empty(self):
90
- refs = [""]
91
- preds = ["tök mindegy"]
92
- with self.assertRaises(ValueError):
93
- cer.compute(predictions=preds, references=refs)
94
-
95
- if __name__ == "__main__":
96
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_modules.py DELETED
@@ -1,390 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- import commons
13
- from commons import init_weights, get_padding
14
- from ONNXVITS_transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
- super().__init__()
38
- self.in_channels = in_channels
39
- self.hidden_channels = hidden_channels
40
- self.out_channels = out_channels
41
- self.kernel_size = kernel_size
42
- self.n_layers = n_layers
43
- self.p_dropout = p_dropout
44
- assert n_layers > 1, "Number of layers should be larger than 0."
45
-
46
- self.conv_layers = nn.ModuleList()
47
- self.norm_layers = nn.ModuleList()
48
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
- self.norm_layers.append(LayerNorm(hidden_channels))
50
- self.relu_drop = nn.Sequential(
51
- nn.ReLU(),
52
- nn.Dropout(p_dropout))
53
- for _ in range(n_layers-1):
54
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
- self.norm_layers.append(LayerNorm(hidden_channels))
56
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
- self.proj.weight.data.zero_()
58
- self.proj.bias.data.zero_()
59
-
60
- def forward(self, x, x_mask):
61
- x_org = x
62
- for i in range(self.n_layers):
63
- x = self.conv_layers[i](x * x_mask)
64
- x = self.norm_layers[i](x)
65
- x = self.relu_drop(x)
66
- x = x_org + self.proj(x)
67
- return x * x_mask
68
-
69
-
70
- class DDSConv(nn.Module):
71
- """
72
- Dialted and Depth-Separable Convolution
73
- """
74
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
- super().__init__()
76
- self.channels = channels
77
- self.kernel_size = kernel_size
78
- self.n_layers = n_layers
79
- self.p_dropout = p_dropout
80
-
81
- self.drop = nn.Dropout(p_dropout)
82
- self.convs_sep = nn.ModuleList()
83
- self.convs_1x1 = nn.ModuleList()
84
- self.norms_1 = nn.ModuleList()
85
- self.norms_2 = nn.ModuleList()
86
- for i in range(n_layers):
87
- dilation = kernel_size ** i
88
- padding = (kernel_size * dilation - dilation) // 2
89
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
- groups=channels, dilation=dilation, padding=padding
91
- ))
92
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
- self.norms_1.append(LayerNorm(channels))
94
- self.norms_2.append(LayerNorm(channels))
95
-
96
- def forward(self, x, x_mask, g=None):
97
- if g is not None:
98
- x = x + g
99
- for i in range(self.n_layers):
100
- y = self.convs_sep[i](x * x_mask)
101
- y = self.norms_1[i](y)
102
- y = F.gelu(y)
103
- y = self.convs_1x1[i](y)
104
- y = self.norms_2[i](y)
105
- y = F.gelu(y)
106
- y = self.drop(y)
107
- x = x + y
108
- return x * x_mask
109
-
110
-
111
- class WN(torch.nn.Module):
112
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
- super(WN, self).__init__()
114
- assert(kernel_size % 2 == 1)
115
- self.hidden_channels =hidden_channels
116
- self.kernel_size = kernel_size,
117
- self.dilation_rate = dilation_rate
118
- self.n_layers = n_layers
119
- self.gin_channels = gin_channels
120
- self.p_dropout = p_dropout
121
-
122
- self.in_layers = torch.nn.ModuleList()
123
- self.res_skip_layers = torch.nn.ModuleList()
124
- self.drop = nn.Dropout(p_dropout)
125
-
126
- if gin_channels != 0:
127
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
-
130
- for i in range(n_layers):
131
- dilation = dilation_rate ** i
132
- padding = int((kernel_size * dilation - dilation) / 2)
133
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
- dilation=dilation, padding=padding)
135
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
- self.in_layers.append(in_layer)
137
-
138
- # last one is not necessary
139
- if i < n_layers - 1:
140
- res_skip_channels = 2 * hidden_channels
141
- else:
142
- res_skip_channels = hidden_channels
143
-
144
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
- self.res_skip_layers.append(res_skip_layer)
147
-
148
- def forward(self, x, x_mask, g=None, **kwargs):
149
- output = torch.zeros_like(x)
150
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
-
152
- if g is not None:
153
- g = self.cond_layer(g)
154
-
155
- for i in range(self.n_layers):
156
- x_in = self.in_layers[i](x)
157
- if g is not None:
158
- cond_offset = i * 2 * self.hidden_channels
159
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
- else:
161
- g_l = torch.zeros_like(x_in)
162
-
163
- acts = commons.fused_add_tanh_sigmoid_multiply(
164
- x_in,
165
- g_l,
166
- n_channels_tensor)
167
- acts = self.drop(acts)
168
-
169
- res_skip_acts = self.res_skip_layers[i](acts)
170
- if i < self.n_layers - 1:
171
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
- x = (x + res_acts) * x_mask
173
- output = output + res_skip_acts[:,self.hidden_channels:,:]
174
- else:
175
- output = output + res_skip_acts
176
- return output * x_mask
177
-
178
- def remove_weight_norm(self):
179
- if self.gin_channels != 0:
180
- torch.nn.utils.remove_weight_norm(self.cond_layer)
181
- for l in self.in_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
- for l in self.res_skip_layers:
184
- torch.nn.utils.remove_weight_norm(l)
185
-
186
-
187
- class ResBlock1(torch.nn.Module):
188
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
- super(ResBlock1, self).__init__()
190
- self.convs1 = nn.ModuleList([
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
- padding=get_padding(kernel_size, dilation[0]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
- padding=get_padding(kernel_size, dilation[1]))),
195
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
- padding=get_padding(kernel_size, dilation[2])))
197
- ])
198
- self.convs1.apply(init_weights)
199
-
200
- self.convs2 = nn.ModuleList([
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1))),
205
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
- padding=get_padding(kernel_size, 1)))
207
- ])
208
- self.convs2.apply(init_weights)
209
-
210
- def forward(self, x, x_mask=None):
211
- for c1, c2 in zip(self.convs1, self.convs2):
212
- xt = F.leaky_relu(x, LRELU_SLOPE)
213
- if x_mask is not None:
214
- xt = xt * x_mask
215
- xt = c1(xt)
216
- xt = F.leaky_relu(xt, LRELU_SLOPE)
217
- if x_mask is not None:
218
- xt = xt * x_mask
219
- xt = c2(xt)
220
- x = xt + x
221
- if x_mask is not None:
222
- x = x * x_mask
223
- return x
224
-
225
- def remove_weight_norm(self):
226
- for l in self.convs1:
227
- remove_weight_norm(l)
228
- for l in self.convs2:
229
- remove_weight_norm(l)
230
-
231
-
232
- class ResBlock2(torch.nn.Module):
233
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
- super(ResBlock2, self).__init__()
235
- self.convs = nn.ModuleList([
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
- padding=get_padding(kernel_size, dilation[0]))),
238
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
- padding=get_padding(kernel_size, dilation[1])))
240
- ])
241
- self.convs.apply(init_weights)
242
-
243
- def forward(self, x, x_mask=None):
244
- for c in self.convs:
245
- xt = F.leaky_relu(x, LRELU_SLOPE)
246
- if x_mask is not None:
247
- xt = xt * x_mask
248
- xt = c(xt)
249
- x = xt + x
250
- if x_mask is not None:
251
- x = x * x_mask
252
- return x
253
-
254
- def remove_weight_norm(self):
255
- for l in self.convs:
256
- remove_weight_norm(l)
257
-
258
-
259
- class Log(nn.Module):
260
- def forward(self, x, x_mask, reverse=False, **kwargs):
261
- if not reverse:
262
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
- logdet = torch.sum(-y, [1, 2])
264
- return y, logdet
265
- else:
266
- x = torch.exp(x) * x_mask
267
- return x
268
-
269
-
270
- class Flip(nn.Module):
271
- def forward(self, x, *args, reverse=False, **kwargs):
272
- x = torch.flip(x, [1])
273
- if not reverse:
274
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
- return x, logdet
276
- else:
277
- return x
278
-
279
-
280
- class ElementwiseAffine(nn.Module):
281
- def __init__(self, channels):
282
- super().__init__()
283
- self.channels = channels
284
- self.m = nn.Parameter(torch.zeros(channels,1))
285
- self.logs = nn.Parameter(torch.zeros(channels,1))
286
-
287
- def forward(self, x, x_mask, reverse=False, **kwargs):
288
- if not reverse:
289
- y = self.m + torch.exp(self.logs) * x
290
- y = y * x_mask
291
- logdet = torch.sum(self.logs * x_mask, [1,2])
292
- return y, logdet
293
- else:
294
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
- return x
296
-
297
-
298
- class ResidualCouplingLayer(nn.Module):
299
- def __init__(self,
300
- channels,
301
- hidden_channels,
302
- kernel_size,
303
- dilation_rate,
304
- n_layers,
305
- p_dropout=0,
306
- gin_channels=0,
307
- mean_only=False):
308
- assert channels % 2 == 0, "channels should be divisible by 2"
309
- super().__init__()
310
- self.channels = channels
311
- self.hidden_channels = hidden_channels
312
- self.kernel_size = kernel_size
313
- self.dilation_rate = dilation_rate
314
- self.n_layers = n_layers
315
- self.half_channels = channels // 2
316
- self.mean_only = mean_only
317
-
318
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
- self.post.weight.data.zero_()
322
- self.post.bias.data.zero_()
323
-
324
- def forward(self, x, x_mask, g=None, reverse=False):
325
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
- h = self.pre(x0) * x_mask
327
- h = self.enc(h, x_mask, g=g)
328
- stats = self.post(h) * x_mask
329
- if not self.mean_only:
330
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
- else:
332
- m = stats
333
- logs = torch.zeros_like(m)
334
-
335
- if not reverse:
336
- x1 = m + x1 * torch.exp(logs) * x_mask
337
- x = torch.cat([x0, x1], 1)
338
- logdet = torch.sum(logs, [1,2])
339
- return x, logdet
340
- else:
341
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
- x = torch.cat([x0, x1], 1)
343
- return x
344
-
345
-
346
- class ConvFlow(nn.Module):
347
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
- super().__init__()
349
- self.in_channels = in_channels
350
- self.filter_channels = filter_channels
351
- self.kernel_size = kernel_size
352
- self.n_layers = n_layers
353
- self.num_bins = num_bins
354
- self.tail_bound = tail_bound
355
- self.half_channels = in_channels // 2
356
-
357
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
- self.proj.weight.data.zero_()
361
- self.proj.bias.data.zero_()
362
-
363
- def forward(self, x, x_mask, g=None, reverse=False):
364
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
- h = self.pre(x0)
366
- h = self.convs(h, x_mask, g=g)
367
- h = self.proj(h) * x_mask
368
-
369
- b, c, t = x0.shape
370
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
-
372
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
-
376
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
- unnormalized_widths,
378
- unnormalized_heights,
379
- unnormalized_derivatives,
380
- inverse=reverse,
381
- tails='linear',
382
- tail_bound=self.tail_bound
383
- )
384
-
385
- x = torch.cat([x0, x1], 1) * x_mask
386
- logdet = torch.sum(logabsdet * x_mask, [1,2])
387
- if not reverse:
388
- return x, logdet
389
- else:
390
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py DELETED
@@ -1,239 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- import numpy as np
20
- import torch
21
-
22
- from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline
23
- from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder
24
- from diffusers.utils import require_torch_gpu, skip_mps, slow, torch_device
25
- from diffusers.utils.testing_utils import enable_full_determinism, require_note_seq, require_onnxruntime
26
-
27
- from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS
28
- from ..test_pipelines_common import PipelineTesterMixin
29
-
30
-
31
- enable_full_determinism()
32
-
33
-
34
- MIDI_FILE = "./tests/fixtures/elise_format0.mid"
35
-
36
-
37
- # The note-seq package throws an error on import because the default installed version of Ipython
38
- # is not compatible with python 3.8 which we run in the CI.
39
- # https://github.com/huggingface/diffusers/actions/runs/4830121056/jobs/8605954838#step:7:98
40
- @unittest.skip("The note-seq package currently throws an error on import")
41
- class SpectrogramDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
42
- pipeline_class = SpectrogramDiffusionPipeline
43
- required_optional_params = PipelineTesterMixin.required_optional_params - {
44
- "callback",
45
- "latents",
46
- "callback_steps",
47
- "output_type",
48
- "num_images_per_prompt",
49
- }
50
- test_attention_slicing = False
51
-
52
- batch_params = TOKENS_TO_AUDIO_GENERATION_PARAMS
53
- params = TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS
54
-
55
- def get_dummy_components(self):
56
- torch.manual_seed(0)
57
- notes_encoder = SpectrogramNotesEncoder(
58
- max_length=2048,
59
- vocab_size=1536,
60
- d_model=768,
61
- dropout_rate=0.1,
62
- num_layers=1,
63
- num_heads=1,
64
- d_kv=4,
65
- d_ff=2048,
66
- feed_forward_proj="gated-gelu",
67
- )
68
-
69
- continuous_encoder = SpectrogramContEncoder(
70
- input_dims=128,
71
- targets_context_length=256,
72
- d_model=768,
73
- dropout_rate=0.1,
74
- num_layers=1,
75
- num_heads=1,
76
- d_kv=4,
77
- d_ff=2048,
78
- feed_forward_proj="gated-gelu",
79
- )
80
-
81
- decoder = T5FilmDecoder(
82
- input_dims=128,
83
- targets_length=256,
84
- max_decoder_noise_time=20000.0,
85
- d_model=768,
86
- num_layers=1,
87
- num_heads=1,
88
- d_kv=4,
89
- d_ff=2048,
90
- dropout_rate=0.1,
91
- )
92
-
93
- scheduler = DDPMScheduler()
94
-
95
- components = {
96
- "notes_encoder": notes_encoder.eval(),
97
- "continuous_encoder": continuous_encoder.eval(),
98
- "decoder": decoder.eval(),
99
- "scheduler": scheduler,
100
- "melgan": None,
101
- }
102
- return components
103
-
104
- def get_dummy_inputs(self, device, seed=0):
105
- if str(device).startswith("mps"):
106
- generator = torch.manual_seed(seed)
107
- else:
108
- generator = torch.Generator(device=device).manual_seed(seed)
109
- inputs = {
110
- "input_tokens": [
111
- [1134, 90, 1135, 1133, 1080, 112, 1132, 1080, 1133, 1079, 133, 1132, 1079, 1133, 1] + [0] * 2033
112
- ],
113
- "generator": generator,
114
- "num_inference_steps": 4,
115
- "output_type": "mel",
116
- }
117
- return inputs
118
-
119
- def test_spectrogram_diffusion(self):
120
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
121
- components = self.get_dummy_components()
122
- pipe = SpectrogramDiffusionPipeline(**components)
123
- pipe = pipe.to(device)
124
- pipe.set_progress_bar_config(disable=None)
125
-
126
- inputs = self.get_dummy_inputs(device)
127
- output = pipe(**inputs)
128
- mel = output.audios
129
-
130
- mel_slice = mel[0, -3:, -3:]
131
-
132
- assert mel_slice.shape == (3, 3)
133
- expected_slice = np.array(
134
- [-11.512925, -4.788215, -0.46172905, -2.051715, -10.539147, -10.970963, -9.091634, 4.0, 4.0]
135
- )
136
- assert np.abs(mel_slice.flatten() - expected_slice).max() < 1e-2
137
-
138
- @skip_mps
139
- def test_save_load_local(self):
140
- return super().test_save_load_local()
141
-
142
- @skip_mps
143
- def test_dict_tuple_outputs_equivalent(self):
144
- return super().test_dict_tuple_outputs_equivalent()
145
-
146
- @skip_mps
147
- def test_save_load_optional_components(self):
148
- return super().test_save_load_optional_components()
149
-
150
- @skip_mps
151
- def test_attention_slicing_forward_pass(self):
152
- return super().test_attention_slicing_forward_pass()
153
-
154
- def test_inference_batch_single_identical(self):
155
- pass
156
-
157
- def test_inference_batch_consistent(self):
158
- pass
159
-
160
- @skip_mps
161
- def test_progress_bar(self):
162
- return super().test_progress_bar()
163
-
164
-
165
- @slow
166
- @require_torch_gpu
167
- @require_onnxruntime
168
- @require_note_seq
169
- class PipelineIntegrationTests(unittest.TestCase):
170
- def tearDown(self):
171
- # clean up the VRAM after each test
172
- super().tearDown()
173
- gc.collect()
174
- torch.cuda.empty_cache()
175
-
176
- def test_callback(self):
177
- # TODO - test that pipeline can decode tokens in a callback
178
- # so that music can be played live
179
- device = torch_device
180
-
181
- pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion")
182
- melgan = pipe.melgan
183
- pipe.melgan = None
184
-
185
- pipe = pipe.to(device)
186
- pipe.set_progress_bar_config(disable=None)
187
-
188
- def callback(step, mel_output):
189
- # decode mel to audio
190
- audio = melgan(input_features=mel_output.astype(np.float32))[0]
191
- assert len(audio[0]) == 81920 * (step + 1)
192
- # simulate that audio is played
193
- return audio
194
-
195
- processor = MidiProcessor()
196
- input_tokens = processor(MIDI_FILE)
197
-
198
- input_tokens = input_tokens[:3]
199
- generator = torch.manual_seed(0)
200
- pipe(input_tokens, num_inference_steps=5, generator=generator, callback=callback, output_type="mel")
201
-
202
- def test_spectrogram_fast(self):
203
- device = torch_device
204
-
205
- pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion")
206
- pipe = pipe.to(device)
207
- pipe.set_progress_bar_config(disable=None)
208
- processor = MidiProcessor()
209
-
210
- input_tokens = processor(MIDI_FILE)
211
- # just run two denoising loops
212
- input_tokens = input_tokens[:2]
213
-
214
- generator = torch.manual_seed(0)
215
- output = pipe(input_tokens, num_inference_steps=2, generator=generator)
216
-
217
- audio = output.audios[0]
218
-
219
- assert abs(np.abs(audio).sum() - 3612.841) < 1e-1
220
-
221
- def test_spectrogram(self):
222
- device = torch_device
223
-
224
- pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion")
225
- pipe = pipe.to(device)
226
- pipe.set_progress_bar_config(disable=None)
227
-
228
- processor = MidiProcessor()
229
-
230
- input_tokens = processor(MIDI_FILE)
231
-
232
- # just run 4 denoising loops
233
- input_tokens = input_tokens[:4]
234
-
235
- generator = torch.manual_seed(0)
236
- output = pipe(input_tokens, num_inference_steps=100, generator=generator)
237
-
238
- audio = output.audios[0]
239
- assert abs(np.abs(audio).sum() - 9389.1111) < 5e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py DELETED
@@ -1,36 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w32',
4
- backbone=dict(
5
- _delete_=True,
6
- type='HRNet',
7
- extra=dict(
8
- stage1=dict(
9
- num_modules=1,
10
- num_branches=1,
11
- block='BOTTLENECK',
12
- num_blocks=(4, ),
13
- num_channels=(64, )),
14
- stage2=dict(
15
- num_modules=1,
16
- num_branches=2,
17
- block='BASIC',
18
- num_blocks=(4, 4),
19
- num_channels=(32, 64)),
20
- stage3=dict(
21
- num_modules=4,
22
- num_branches=3,
23
- block='BASIC',
24
- num_blocks=(4, 4, 4),
25
- num_channels=(32, 64, 128)),
26
- stage4=dict(
27
- num_modules=3,
28
- num_branches=4,
29
- block='BASIC',
30
- num_blocks=(4, 4, 4, 4),
31
- num_channels=(32, 64, 128, 256)))),
32
- neck=dict(
33
- _delete_=True,
34
- type='HRFPN',
35
- in_channels=[32, 64, 128, 256],
36
- out_channels=256))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnest101',
4
- backbone=dict(stem_channels=128, depth=101))
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/gfl_head.py DELETED
@@ -1,647 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
5
- from mmcv.runner import force_fp32
6
-
7
- from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
8
- build_assigner, build_sampler, distance2bbox,
9
- images_to_levels, multi_apply, multiclass_nms,
10
- reduce_mean, unmap)
11
- from ..builder import HEADS, build_loss
12
- from .anchor_head import AnchorHead
13
-
14
-
15
- class Integral(nn.Module):
16
- """A fixed layer for calculating integral result from distribution.
17
-
18
- This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
19
- P(y_i) denotes the softmax vector that represents the discrete distribution
20
- y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
21
-
22
- Args:
23
- reg_max (int): The maximal value of the discrete set. Default: 16. You
24
- may want to reset it according to your new dataset or related
25
- settings.
26
- """
27
-
28
- def __init__(self, reg_max=16):
29
- super(Integral, self).__init__()
30
- self.reg_max = reg_max
31
- self.register_buffer('project',
32
- torch.linspace(0, self.reg_max, self.reg_max + 1))
33
-
34
- def forward(self, x):
35
- """Forward feature from the regression head to get integral result of
36
- bounding box location.
37
-
38
- Args:
39
- x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
40
- n is self.reg_max.
41
-
42
- Returns:
43
- x (Tensor): Integral result of box locations, i.e., distance
44
- offsets from the box center in four directions, shape (N, 4).
45
- """
46
- x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
47
- x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
48
- return x
49
-
50
-
51
- @HEADS.register_module()
52
- class GFLHead(AnchorHead):
53
- """Generalized Focal Loss: Learning Qualified and Distributed Bounding
54
- Boxes for Dense Object Detection.
55
-
56
- GFL head structure is similar with ATSS, however GFL uses
57
- 1) joint representation for classification and localization quality, and
58
- 2) flexible General distribution for bounding box locations,
59
- which are supervised by
60
- Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
61
-
62
- https://arxiv.org/abs/2006.04388
63
-
64
- Args:
65
- num_classes (int): Number of categories excluding the background
66
- category.
67
- in_channels (int): Number of channels in the input feature map.
68
- stacked_convs (int): Number of conv layers in cls and reg tower.
69
- Default: 4.
70
- conv_cfg (dict): dictionary to construct and config conv layer.
71
- Default: None.
72
- norm_cfg (dict): dictionary to construct and config norm layer.
73
- Default: dict(type='GN', num_groups=32, requires_grad=True).
74
- loss_qfl (dict): Config of Quality Focal Loss (QFL).
75
- reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
76
- in QFL setting. Default: 16.
77
- Example:
78
- >>> self = GFLHead(11, 7)
79
- >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
80
- >>> cls_quality_score, bbox_pred = self.forward(feats)
81
- >>> assert len(cls_quality_score) == len(self.scales)
82
- """
83
-
84
- def __init__(self,
85
- num_classes,
86
- in_channels,
87
- stacked_convs=4,
88
- conv_cfg=None,
89
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
90
- loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
91
- reg_max=16,
92
- **kwargs):
93
- self.stacked_convs = stacked_convs
94
- self.conv_cfg = conv_cfg
95
- self.norm_cfg = norm_cfg
96
- self.reg_max = reg_max
97
- super(GFLHead, self).__init__(num_classes, in_channels, **kwargs)
98
-
99
- self.sampling = False
100
- if self.train_cfg:
101
- self.assigner = build_assigner(self.train_cfg.assigner)
102
- # SSD sampling=False so use PseudoSampler
103
- sampler_cfg = dict(type='PseudoSampler')
104
- self.sampler = build_sampler(sampler_cfg, context=self)
105
-
106
- self.integral = Integral(self.reg_max)
107
- self.loss_dfl = build_loss(loss_dfl)
108
-
109
- def _init_layers(self):
110
- """Initialize layers of the head."""
111
- self.relu = nn.ReLU(inplace=True)
112
- self.cls_convs = nn.ModuleList()
113
- self.reg_convs = nn.ModuleList()
114
- for i in range(self.stacked_convs):
115
- chn = self.in_channels if i == 0 else self.feat_channels
116
- self.cls_convs.append(
117
- ConvModule(
118
- chn,
119
- self.feat_channels,
120
- 3,
121
- stride=1,
122
- padding=1,
123
- conv_cfg=self.conv_cfg,
124
- norm_cfg=self.norm_cfg))
125
- self.reg_convs.append(
126
- ConvModule(
127
- chn,
128
- self.feat_channels,
129
- 3,
130
- stride=1,
131
- padding=1,
132
- conv_cfg=self.conv_cfg,
133
- norm_cfg=self.norm_cfg))
134
- assert self.num_anchors == 1, 'anchor free version'
135
- self.gfl_cls = nn.Conv2d(
136
- self.feat_channels, self.cls_out_channels, 3, padding=1)
137
- self.gfl_reg = nn.Conv2d(
138
- self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
139
- self.scales = nn.ModuleList(
140
- [Scale(1.0) for _ in self.anchor_generator.strides])
141
-
142
- def init_weights(self):
143
- """Initialize weights of the head."""
144
- for m in self.cls_convs:
145
- normal_init(m.conv, std=0.01)
146
- for m in self.reg_convs:
147
- normal_init(m.conv, std=0.01)
148
- bias_cls = bias_init_with_prob(0.01)
149
- normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
150
- normal_init(self.gfl_reg, std=0.01)
151
-
152
- def forward(self, feats):
153
- """Forward features from the upstream network.
154
-
155
- Args:
156
- feats (tuple[Tensor]): Features from the upstream network, each is
157
- a 4D-tensor.
158
-
159
- Returns:
160
- tuple: Usually a tuple of classification scores and bbox prediction
161
- cls_scores (list[Tensor]): Classification and quality (IoU)
162
- joint scores for all scale levels, each is a 4D-tensor,
163
- the channel number is num_classes.
164
- bbox_preds (list[Tensor]): Box distribution logits for all
165
- scale levels, each is a 4D-tensor, the channel number is
166
- 4*(n+1), n is max value of integral set.
167
- """
168
- return multi_apply(self.forward_single, feats, self.scales)
169
-
170
- def forward_single(self, x, scale):
171
- """Forward feature of a single scale level.
172
-
173
- Args:
174
- x (Tensor): Features of a single scale level.
175
- scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
176
- the bbox prediction.
177
-
178
- Returns:
179
- tuple:
180
- cls_score (Tensor): Cls and quality joint scores for a single
181
- scale level the channel number is num_classes.
182
- bbox_pred (Tensor): Box distribution logits for a single scale
183
- level, the channel number is 4*(n+1), n is max value of
184
- integral set.
185
- """
186
- cls_feat = x
187
- reg_feat = x
188
- for cls_conv in self.cls_convs:
189
- cls_feat = cls_conv(cls_feat)
190
- for reg_conv in self.reg_convs:
191
- reg_feat = reg_conv(reg_feat)
192
- cls_score = self.gfl_cls(cls_feat)
193
- bbox_pred = scale(self.gfl_reg(reg_feat)).float()
194
- return cls_score, bbox_pred
195
-
196
- def anchor_center(self, anchors):
197
- """Get anchor centers from anchors.
198
-
199
- Args:
200
- anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
201
-
202
- Returns:
203
- Tensor: Anchor centers with shape (N, 2), "xy" format.
204
- """
205
- anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2
206
- anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2
207
- return torch.stack([anchors_cx, anchors_cy], dim=-1)
208
-
209
- def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
210
- bbox_targets, stride, num_total_samples):
211
- """Compute loss of a single scale level.
212
-
213
- Args:
214
- anchors (Tensor): Box reference for each scale level with shape
215
- (N, num_total_anchors, 4).
216
- cls_score (Tensor): Cls and quality joint scores for each scale
217
- level has shape (N, num_classes, H, W).
218
- bbox_pred (Tensor): Box distribution logits for each scale
219
- level with shape (N, 4*(n+1), H, W), n is max value of integral
220
- set.
221
- labels (Tensor): Labels of each anchors with shape
222
- (N, num_total_anchors).
223
- label_weights (Tensor): Label weights of each anchor with shape
224
- (N, num_total_anchors)
225
- bbox_targets (Tensor): BBox regression targets of each anchor wight
226
- shape (N, num_total_anchors, 4).
227
- stride (tuple): Stride in this scale level.
228
- num_total_samples (int): Number of positive samples that is
229
- reduced over all GPUs.
230
-
231
- Returns:
232
- dict[str, Tensor]: A dictionary of loss components.
233
- """
234
- assert stride[0] == stride[1], 'h stride is not equal to w stride!'
235
- anchors = anchors.reshape(-1, 4)
236
- cls_score = cls_score.permute(0, 2, 3,
237
- 1).reshape(-1, self.cls_out_channels)
238
- bbox_pred = bbox_pred.permute(0, 2, 3,
239
- 1).reshape(-1, 4 * (self.reg_max + 1))
240
- bbox_targets = bbox_targets.reshape(-1, 4)
241
- labels = labels.reshape(-1)
242
- label_weights = label_weights.reshape(-1)
243
-
244
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
245
- bg_class_ind = self.num_classes
246
- pos_inds = ((labels >= 0)
247
- & (labels < bg_class_ind)).nonzero().squeeze(1)
248
- score = label_weights.new_zeros(labels.shape)
249
-
250
- if len(pos_inds) > 0:
251
- pos_bbox_targets = bbox_targets[pos_inds]
252
- pos_bbox_pred = bbox_pred[pos_inds]
253
- pos_anchors = anchors[pos_inds]
254
- pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
255
-
256
- weight_targets = cls_score.detach().sigmoid()
257
- weight_targets = weight_targets.max(dim=1)[0][pos_inds]
258
- pos_bbox_pred_corners = self.integral(pos_bbox_pred)
259
- pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
260
- pos_bbox_pred_corners)
261
- pos_decode_bbox_targets = pos_bbox_targets / stride[0]
262
- score[pos_inds] = bbox_overlaps(
263
- pos_decode_bbox_pred.detach(),
264
- pos_decode_bbox_targets,
265
- is_aligned=True)
266
- pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
267
- target_corners = bbox2distance(pos_anchor_centers,
268
- pos_decode_bbox_targets,
269
- self.reg_max).reshape(-1)
270
-
271
- # regression loss
272
- loss_bbox = self.loss_bbox(
273
- pos_decode_bbox_pred,
274
- pos_decode_bbox_targets,
275
- weight=weight_targets,
276
- avg_factor=1.0)
277
-
278
- # dfl loss
279
- loss_dfl = self.loss_dfl(
280
- pred_corners,
281
- target_corners,
282
- weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
283
- avg_factor=4.0)
284
- else:
285
- loss_bbox = bbox_pred.sum() * 0
286
- loss_dfl = bbox_pred.sum() * 0
287
- weight_targets = bbox_pred.new_tensor(0)
288
-
289
- # cls (qfl) loss
290
- loss_cls = self.loss_cls(
291
- cls_score, (labels, score),
292
- weight=label_weights,
293
- avg_factor=num_total_samples)
294
-
295
- return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
296
-
297
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
298
- def loss(self,
299
- cls_scores,
300
- bbox_preds,
301
- gt_bboxes,
302
- gt_labels,
303
- img_metas,
304
- gt_bboxes_ignore=None):
305
- """Compute losses of the head.
306
-
307
- Args:
308
- cls_scores (list[Tensor]): Cls and quality scores for each scale
309
- level has shape (N, num_classes, H, W).
310
- bbox_preds (list[Tensor]): Box distribution logits for each scale
311
- level with shape (N, 4*(n+1), H, W), n is max value of integral
312
- set.
313
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
314
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
315
- gt_labels (list[Tensor]): class indices corresponding to each box
316
- img_metas (list[dict]): Meta information of each image, e.g.,
317
- image size, scaling factor, etc.
318
- gt_bboxes_ignore (list[Tensor] | None): specify which bounding
319
- boxes can be ignored when computing the loss.
320
-
321
- Returns:
322
- dict[str, Tensor]: A dictionary of loss components.
323
- """
324
-
325
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
326
- assert len(featmap_sizes) == self.anchor_generator.num_levels
327
-
328
- device = cls_scores[0].device
329
- anchor_list, valid_flag_list = self.get_anchors(
330
- featmap_sizes, img_metas, device=device)
331
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
332
-
333
- cls_reg_targets = self.get_targets(
334
- anchor_list,
335
- valid_flag_list,
336
- gt_bboxes,
337
- img_metas,
338
- gt_bboxes_ignore_list=gt_bboxes_ignore,
339
- gt_labels_list=gt_labels,
340
- label_channels=label_channels)
341
- if cls_reg_targets is None:
342
- return None
343
-
344
- (anchor_list, labels_list, label_weights_list, bbox_targets_list,
345
- bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
346
-
347
- num_total_samples = reduce_mean(
348
- torch.tensor(num_total_pos, dtype=torch.float,
349
- device=device)).item()
350
- num_total_samples = max(num_total_samples, 1.0)
351
-
352
- losses_cls, losses_bbox, losses_dfl,\
353
- avg_factor = multi_apply(
354
- self.loss_single,
355
- anchor_list,
356
- cls_scores,
357
- bbox_preds,
358
- labels_list,
359
- label_weights_list,
360
- bbox_targets_list,
361
- self.anchor_generator.strides,
362
- num_total_samples=num_total_samples)
363
-
364
- avg_factor = sum(avg_factor)
365
- avg_factor = reduce_mean(avg_factor).item()
366
- losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
367
- losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
368
- return dict(
369
- loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
370
-
371
- def _get_bboxes(self,
372
- cls_scores,
373
- bbox_preds,
374
- mlvl_anchors,
375
- img_shapes,
376
- scale_factors,
377
- cfg,
378
- rescale=False,
379
- with_nms=True):
380
- """Transform outputs for a single batch item into labeled boxes.
381
-
382
- Args:
383
- cls_scores (list[Tensor]): Box scores for a single scale level
384
- has shape (N, num_classes, H, W).
385
- bbox_preds (list[Tensor]): Box distribution logits for a single
386
- scale level with shape (N, 4*(n+1), H, W), n is max value of
387
- integral set.
388
- mlvl_anchors (list[Tensor]): Box reference for a single scale level
389
- with shape (num_total_anchors, 4).
390
- img_shapes (list[tuple[int]]): Shape of the input image,
391
- list[(height, width, 3)].
392
- scale_factors (list[ndarray]): Scale factor of the image arange as
393
- (w_scale, h_scale, w_scale, h_scale).
394
- cfg (mmcv.Config | None): Test / postprocessing configuration,
395
- if None, test_cfg would be used.
396
- rescale (bool): If True, return boxes in original image space.
397
- Default: False.
398
- with_nms (bool): If True, do nms before return boxes.
399
- Default: True.
400
-
401
- Returns:
402
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
403
- The first item is an (n, 5) tensor, where 5 represent
404
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
405
- The shape of the second tensor in the tuple is (n,), and
406
- each element represents the class label of the corresponding
407
- box.
408
- """
409
- cfg = self.test_cfg if cfg is None else cfg
410
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
411
- batch_size = cls_scores[0].shape[0]
412
-
413
- mlvl_bboxes = []
414
- mlvl_scores = []
415
- for cls_score, bbox_pred, stride, anchors in zip(
416
- cls_scores, bbox_preds, self.anchor_generator.strides,
417
- mlvl_anchors):
418
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
419
- assert stride[0] == stride[1]
420
- scores = cls_score.permute(0, 2, 3, 1).reshape(
421
- batch_size, -1, self.cls_out_channels).sigmoid()
422
- bbox_pred = bbox_pred.permute(0, 2, 3, 1)
423
-
424
- bbox_pred = self.integral(bbox_pred) * stride[0]
425
- bbox_pred = bbox_pred.reshape(batch_size, -1, 4)
426
-
427
- nms_pre = cfg.get('nms_pre', -1)
428
- if nms_pre > 0 and scores.shape[1] > nms_pre:
429
- max_scores, _ = scores.max(-1)
430
- _, topk_inds = max_scores.topk(nms_pre)
431
- batch_inds = torch.arange(batch_size).view(
432
- -1, 1).expand_as(topk_inds).long()
433
- anchors = anchors[topk_inds, :]
434
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
435
- scores = scores[batch_inds, topk_inds, :]
436
- else:
437
- anchors = anchors.expand_as(bbox_pred)
438
-
439
- bboxes = distance2bbox(
440
- self.anchor_center(anchors), bbox_pred, max_shape=img_shapes)
441
- mlvl_bboxes.append(bboxes)
442
- mlvl_scores.append(scores)
443
-
444
- batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
445
- if rescale:
446
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
447
- scale_factors).unsqueeze(1)
448
-
449
- batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
450
- # Add a dummy background class to the backend when using sigmoid
451
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
452
- # BG cat_id: num_class
453
- padding = batch_mlvl_scores.new_zeros(batch_size,
454
- batch_mlvl_scores.shape[1], 1)
455
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
456
-
457
- if with_nms:
458
- det_results = []
459
- for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
460
- batch_mlvl_scores):
461
- det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
462
- cfg.score_thr, cfg.nms,
463
- cfg.max_per_img)
464
- det_results.append(tuple([det_bbox, det_label]))
465
- else:
466
- det_results = [
467
- tuple(mlvl_bs)
468
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
469
- ]
470
- return det_results
471
-
472
- def get_targets(self,
473
- anchor_list,
474
- valid_flag_list,
475
- gt_bboxes_list,
476
- img_metas,
477
- gt_bboxes_ignore_list=None,
478
- gt_labels_list=None,
479
- label_channels=1,
480
- unmap_outputs=True):
481
- """Get targets for GFL head.
482
-
483
- This method is almost the same as `AnchorHead.get_targets()`. Besides
484
- returning the targets as the parent method does, it also returns the
485
- anchors as the first element of the returned tuple.
486
- """
487
- num_imgs = len(img_metas)
488
- assert len(anchor_list) == len(valid_flag_list) == num_imgs
489
-
490
- # anchor number of multi levels
491
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
492
- num_level_anchors_list = [num_level_anchors] * num_imgs
493
-
494
- # concat all level anchors and flags to a single tensor
495
- for i in range(num_imgs):
496
- assert len(anchor_list[i]) == len(valid_flag_list[i])
497
- anchor_list[i] = torch.cat(anchor_list[i])
498
- valid_flag_list[i] = torch.cat(valid_flag_list[i])
499
-
500
- # compute targets for each image
501
- if gt_bboxes_ignore_list is None:
502
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
503
- if gt_labels_list is None:
504
- gt_labels_list = [None for _ in range(num_imgs)]
505
- (all_anchors, all_labels, all_label_weights, all_bbox_targets,
506
- all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
507
- self._get_target_single,
508
- anchor_list,
509
- valid_flag_list,
510
- num_level_anchors_list,
511
- gt_bboxes_list,
512
- gt_bboxes_ignore_list,
513
- gt_labels_list,
514
- img_metas,
515
- label_channels=label_channels,
516
- unmap_outputs=unmap_outputs)
517
- # no valid anchors
518
- if any([labels is None for labels in all_labels]):
519
- return None
520
- # sampled anchors of all images
521
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
522
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
523
- # split targets to a list w.r.t. multiple levels
524
- anchors_list = images_to_levels(all_anchors, num_level_anchors)
525
- labels_list = images_to_levels(all_labels, num_level_anchors)
526
- label_weights_list = images_to_levels(all_label_weights,
527
- num_level_anchors)
528
- bbox_targets_list = images_to_levels(all_bbox_targets,
529
- num_level_anchors)
530
- bbox_weights_list = images_to_levels(all_bbox_weights,
531
- num_level_anchors)
532
- return (anchors_list, labels_list, label_weights_list,
533
- bbox_targets_list, bbox_weights_list, num_total_pos,
534
- num_total_neg)
535
-
536
- def _get_target_single(self,
537
- flat_anchors,
538
- valid_flags,
539
- num_level_anchors,
540
- gt_bboxes,
541
- gt_bboxes_ignore,
542
- gt_labels,
543
- img_meta,
544
- label_channels=1,
545
- unmap_outputs=True):
546
- """Compute regression, classification targets for anchors in a single
547
- image.
548
-
549
- Args:
550
- flat_anchors (Tensor): Multi-level anchors of the image, which are
551
- concatenated into a single tensor of shape (num_anchors, 4)
552
- valid_flags (Tensor): Multi level valid flags of the image,
553
- which are concatenated into a single tensor of
554
- shape (num_anchors,).
555
- num_level_anchors Tensor): Number of anchors of each scale level.
556
- gt_bboxes (Tensor): Ground truth bboxes of the image,
557
- shape (num_gts, 4).
558
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
559
- ignored, shape (num_ignored_gts, 4).
560
- gt_labels (Tensor): Ground truth labels of each box,
561
- shape (num_gts,).
562
- img_meta (dict): Meta info of the image.
563
- label_channels (int): Channel of label.
564
- unmap_outputs (bool): Whether to map outputs back to the original
565
- set of anchors.
566
-
567
- Returns:
568
- tuple: N is the number of total anchors in the image.
569
- anchors (Tensor): All anchors in the image with shape (N, 4).
570
- labels (Tensor): Labels of all anchors in the image with shape
571
- (N,).
572
- label_weights (Tensor): Label weights of all anchor in the
573
- image with shape (N,).
574
- bbox_targets (Tensor): BBox targets of all anchors in the
575
- image with shape (N, 4).
576
- bbox_weights (Tensor): BBox weights of all anchors in the
577
- image with shape (N, 4).
578
- pos_inds (Tensor): Indices of positive anchor with shape
579
- (num_pos,).
580
- neg_inds (Tensor): Indices of negative anchor with shape
581
- (num_neg,).
582
- """
583
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
584
- img_meta['img_shape'][:2],
585
- self.train_cfg.allowed_border)
586
- if not inside_flags.any():
587
- return (None, ) * 7
588
- # assign gt and sample anchors
589
- anchors = flat_anchors[inside_flags, :]
590
-
591
- num_level_anchors_inside = self.get_num_level_anchors_inside(
592
- num_level_anchors, inside_flags)
593
- assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
594
- gt_bboxes, gt_bboxes_ignore,
595
- gt_labels)
596
-
597
- sampling_result = self.sampler.sample(assign_result, anchors,
598
- gt_bboxes)
599
-
600
- num_valid_anchors = anchors.shape[0]
601
- bbox_targets = torch.zeros_like(anchors)
602
- bbox_weights = torch.zeros_like(anchors)
603
- labels = anchors.new_full((num_valid_anchors, ),
604
- self.num_classes,
605
- dtype=torch.long)
606
- label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
607
-
608
- pos_inds = sampling_result.pos_inds
609
- neg_inds = sampling_result.neg_inds
610
- if len(pos_inds) > 0:
611
- pos_bbox_targets = sampling_result.pos_gt_bboxes
612
- bbox_targets[pos_inds, :] = pos_bbox_targets
613
- bbox_weights[pos_inds, :] = 1.0
614
- if gt_labels is None:
615
- # Only rpn gives gt_labels as None
616
- # Foreground is the first class
617
- labels[pos_inds] = 0
618
- else:
619
- labels[pos_inds] = gt_labels[
620
- sampling_result.pos_assigned_gt_inds]
621
- if self.train_cfg.pos_weight <= 0:
622
- label_weights[pos_inds] = 1.0
623
- else:
624
- label_weights[pos_inds] = self.train_cfg.pos_weight
625
- if len(neg_inds) > 0:
626
- label_weights[neg_inds] = 1.0
627
-
628
- # map up to original set of anchors
629
- if unmap_outputs:
630
- num_total_anchors = flat_anchors.size(0)
631
- anchors = unmap(anchors, num_total_anchors, inside_flags)
632
- labels = unmap(
633
- labels, num_total_anchors, inside_flags, fill=self.num_classes)
634
- label_weights = unmap(label_weights, num_total_anchors,
635
- inside_flags)
636
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
637
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
638
-
639
- return (anchors, labels, label_weights, bbox_targets, bbox_weights,
640
- pos_inds, neg_inds)
641
-
642
- def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
643
- split_inside_flags = torch.split(inside_flags, num_level_anchors)
644
- num_level_anchors_inside = [
645
- int(flags.sum()) for flags in split_inside_flags
646
- ]
647
- return num_level_anchors_inside
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/image_editor.py DELETED
@@ -1,542 +0,0 @@
1
- import os
2
- from pathlib import Path
3
- from optimization.constants import ASSETS_DIR_NAME, RANKED_RESULTS_DIR
4
-
5
- from utils.metrics_accumulator import MetricsAccumulator
6
- from utils.video import save_video
7
- from utils.fft_pytorch import HighFrequencyLoss
8
-
9
- from numpy import random
10
- from optimization.augmentations import ImageAugmentations
11
-
12
- from PIL import Image
13
- import torch
14
- import torchvision
15
- from torchvision import transforms
16
- import torchvision.transforms.functional as F
17
- from torchvision.transforms import functional as TF
18
- from torch.nn.functional import mse_loss
19
- from optimization.losses import range_loss, d_clip_loss
20
- import lpips
21
- import numpy as np
22
-
23
- from CLIP import clip
24
- from guided_diffusion.guided_diffusion.script_util import (
25
- create_model_and_diffusion,
26
- model_and_diffusion_defaults,
27
- create_classifier,
28
- classifier_defaults,
29
- )
30
- from utils.visualization import show_tensor_image, show_editied_masked_image
31
- from utils.change_place import change_place, find_bbox
32
-
33
- import pdb
34
- import cv2
35
-
36
- def create_classifier_ours():
37
-
38
- model = torchvision.models.resnet50()
39
- ckpt = torch.load('checkpoints/DRA_resnet50.pth')['model_state_dict']
40
- model.load_state_dict({k.replace('module.','').replace('last_linear','fc'):v for k,v in ckpt.items()})
41
- model = torch.nn.Sequential(*[torch.nn.Upsample(size=(256,256)), model])
42
- return model
43
-
44
- class ImageEditor:
45
- def __init__(self, args) -> None:
46
- self.args = args
47
- os.makedirs(self.args.output_path, exist_ok=True)
48
-
49
- self.ranked_results_path = Path(os.path.join(self.args.output_path, RANKED_RESULTS_DIR))
50
- os.makedirs(self.ranked_results_path, exist_ok=True)
51
-
52
- if self.args.export_assets:
53
- self.assets_path = Path(os.path.join(self.args.output_path, ASSETS_DIR_NAME))
54
- os.makedirs(self.assets_path, exist_ok=True)
55
- if self.args.seed is not None:
56
- torch.manual_seed(self.args.seed)
57
- np.random.seed(self.args.seed)
58
- random.seed(self.args.seed)
59
-
60
- self.model_config = model_and_diffusion_defaults()
61
- self.model_config.update(
62
- {
63
- "attention_resolutions": "32, 16, 8",
64
- "class_cond": self.args.model_output_size == 512,
65
- "diffusion_steps": 1000,
66
- "rescale_timesteps": True,
67
- "timestep_respacing": self.args.timestep_respacing,
68
- "image_size": self.args.model_output_size,
69
- "learn_sigma": True,
70
- "noise_schedule": "linear",
71
- "num_channels": 256,
72
- "num_head_channels": 64,
73
- "num_res_blocks": 2,
74
- "resblock_updown": True,
75
- "use_fp16": True,
76
- "use_scale_shift_norm": True,
77
- }
78
- )
79
-
80
- self.classifier_config = classifier_defaults()
81
- self.classifier_config.update(
82
- {
83
- "image_size": self.args.model_output_size,
84
- }
85
- )
86
-
87
- # Load models
88
- self.device = torch.device(
89
- f"cuda:{self.args.gpu_id}" if torch.cuda.is_available() else "cpu"
90
- )
91
- print("Using device:", self.device)
92
-
93
- self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
94
- self.model.load_state_dict(
95
- torch.load(
96
- "checkpoints/256x256_diffusion_uncond.pt"
97
- if self.args.model_output_size == 256
98
- else "checkpoints/512x512_diffusion.pt",
99
- map_location="cpu",
100
- )
101
- )
102
- # self.model.requires_grad_(False).eval().to(self.device)
103
- self.model.eval().to(self.device)
104
- for name, param in self.model.named_parameters():
105
- if "qkv" in name or "norm" in name or "proj" in name:
106
- param.requires_grad_()
107
- if self.model_config["use_fp16"]:
108
- self.model.convert_to_fp16()
109
-
110
- self.classifier = create_classifier(**self.classifier_config)
111
- self.classifier.load_state_dict(
112
- torch.load("checkpoints/256x256_classifier.pt", map_location="cpu")
113
- )
114
- # self.classifier.requires_grad_(False).eval().to(self.device)
115
-
116
-
117
- # self.classifier = create_classifier_ours()
118
-
119
- self.classifier.eval().to(self.device)
120
- if self.classifier_config["classifier_use_fp16"]:
121
- self.classifier.convert_to_fp16()
122
-
123
- self.clip_model = (
124
- clip.load("ViT-B/16", device=self.device, jit=False)[0].eval().requires_grad_(False)
125
- )
126
- self.clip_size = self.clip_model.visual.input_resolution
127
- self.clip_normalize = transforms.Normalize(
128
- mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]
129
- )
130
- self.to_tensor = transforms.ToTensor()
131
- self.lpips_model = lpips.LPIPS(net="vgg").to(self.device)
132
-
133
- self.image_augmentations = ImageAugmentations(self.clip_size, self.args.aug_num)
134
- self.metrics_accumulator = MetricsAccumulator()
135
-
136
- self.hf_loss = HighFrequencyLoss()
137
-
138
-
139
- def unscale_timestep(self, t):
140
- unscaled_timestep = (t * (self.diffusion.num_timesteps / 1000)).long()
141
-
142
- return unscaled_timestep
143
-
144
-
145
- def clip_loss(self, x_in, text_embed):
146
- clip_loss = torch.tensor(0)
147
-
148
- if self.mask is not None:
149
- masked_input = x_in * self.mask
150
- else:
151
- masked_input = x_in
152
- augmented_input = self.image_augmentations(masked_input).add(1).div(2) # shape: [N,C,H,W], range: [0,1]
153
- clip_in = self.clip_normalize(augmented_input)
154
- # pdb.set_trace()
155
- image_embeds = self.clip_model.encode_image(clip_in).float()
156
- dists = d_clip_loss(image_embeds, text_embed)
157
-
158
- # We want to sum over the averages
159
- for i in range(self.args.batch_size):
160
- # We want to average at the "augmentations level"
161
- clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
162
-
163
- return clip_loss
164
-
165
- def unaugmented_clip_distance(self, x, text_embed):
166
- x = F.resize(x, [self.clip_size, self.clip_size])
167
- image_embeds = self.clip_model.encode_image(x).float()
168
- dists = d_clip_loss(image_embeds, text_embed)
169
-
170
- return dists.item()
171
-
172
- def model_fn(self, x,t,y=None):
173
- return self.model(x, t, y if self.args.class_cond else None)
174
-
175
- def edit_image_by_prompt(self):
176
- if self.args.image_guide:
177
- img_guidance = Image.open(self.args.prompt).convert('RGB')
178
- img_guidance = img_guidance.resize((224,224), Image.LANCZOS) # type: ignore
179
- img_guidance = self.clip_normalize(self.to_tensor(img_guidance).unsqueeze(0)).to(self.device)
180
- text_embed = self.clip_model.encode_image(img_guidance).float()
181
-
182
- else:
183
- text_embed = self.clip_model.encode_text(
184
- clip.tokenize(self.args.prompt).to(self.device)
185
- ).float()
186
-
187
- self.image_size = (self.model_config["image_size"], self.model_config["image_size"])
188
- self.init_image_pil = Image.open(self.args.init_image).convert("RGB")
189
- self.init_image_pil = self.init_image_pil.resize(self.image_size, Image.LANCZOS) # type: ignore
190
- self.init_image = (
191
- TF.to_tensor(self.init_image_pil).to(self.device).unsqueeze(0).mul(2).sub(1)
192
- )
193
- self.init_image_pil_2 = Image.open(self.args.init_image_2).convert("RGB")
194
- if self.args.rotate_obj:
195
- # angle = random.randint(-45,45)
196
- angle = self.args.angle
197
- self.init_image_pil_2 = self.init_image_pil_2.rotate(angle)
198
- self.init_image_pil_2 = self.init_image_pil_2.resize(self.image_size, Image.LANCZOS) # type: ignore
199
- self.init_image_2 = (
200
- TF.to_tensor(self.init_image_pil_2).to(self.device).unsqueeze(0).mul(2).sub(1)
201
- )
202
-
203
- '''
204
- # Init with the inpainting image
205
- self.init_image_pil_ = Image.open('output/ImageNet-S_val/bad_case_RN50/ILSVRC2012_val_00013212/ranked/08480_output_i_0_b_0.png').convert("RGB")
206
- self.init_image_pil_ = self.init_image_pil_.resize(self.image_size, Image.LANCZOS) # type: ignore
207
- self.init_image_ = (
208
- TF.to_tensor(self.init_image_pil_).to(self.device).unsqueeze(0).mul(2).sub(1)
209
- )
210
- '''
211
-
212
- if self.args.export_assets:
213
- img_path = self.assets_path / Path(self.args.output_file)
214
- self.init_image_pil.save(img_path, quality=100)
215
-
216
- self.mask = torch.ones_like(self.init_image, device=self.device)
217
- self.mask_pil = None
218
- if self.args.mask is not None:
219
- self.mask_pil = Image.open(self.args.mask).convert("RGB")
220
- if self.args.rotate_obj:
221
- self.mask_pil = self.mask_pil.rotate(angle)
222
- if self.mask_pil.size != self.image_size:
223
- self.mask_pil = self.mask_pil.resize(self.image_size, Image.NEAREST) # type: ignore
224
- if self.args.random_position:
225
- bbox = find_bbox(np.array(self.mask_pil))
226
- print(bbox)
227
-
228
- image_mask_pil_binarized = ((np.array(self.mask_pil) > 0.5) * 255).astype(np.uint8)
229
- # image_mask_pil_binarized = cv2.dilate(image_mask_pil_binarized, np.ones((50,50), np.uint8), iterations=1)
230
- if self.args.invert_mask:
231
- image_mask_pil_binarized = 255 - image_mask_pil_binarized
232
- self.mask_pil = TF.to_pil_image(image_mask_pil_binarized)
233
- self.mask = TF.to_tensor(Image.fromarray(image_mask_pil_binarized))
234
- self.mask = self.mask[0, ...].unsqueeze(0).unsqueeze(0).to(self.device)
235
- # self.mask[:] = 1
236
-
237
- if self.args.random_position:
238
- # print(self.init_image_2.shape, self.init_image_2.max(), self.init_image_2.min())
239
- # print(self.mask.shape, self.mask.max(), self.mask.min())
240
- # cv2.imwrite('tmp/init_before.jpg', np.transpose(((self.init_image_2+1)/2*255).cpu().numpy()[0], (1,2,0))[:,:,::-1])
241
- # cv2.imwrite('tmp/mask_before.jpg', (self.mask*255).cpu().numpy()[0][0])
242
- self.init_image_2, self.mask = change_place(self.init_image_2, self.mask, bbox, self.args.invert_mask)
243
- # cv2.imwrite('tmp/init_after.jpg', np.transpose(((self.init_image_2+1)/2*255).cpu().numpy()[0], (1,2,0))[:,:,::-1])
244
- # cv2.imwrite('tmp/mask_after.jpg', (self.mask*255).cpu().numpy()[0][0])
245
-
246
- if self.args.export_assets:
247
- mask_path = self.assets_path / Path(
248
- self.args.output_file.replace(".png", "_mask.png")
249
- )
250
- self.mask_pil.save(mask_path, quality=100)
251
-
252
- def class_guided(x, y, t):
253
- assert y is not None
254
- with torch.enable_grad():
255
- x_in = x.detach().requires_grad_(True)
256
- # logits = self.classifier(x_in, t)
257
- logits = self.classifier(x_in)
258
- log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
259
- selected = log_probs[range(len(logits)), y.view(-1)]
260
- loss = selected.sum()
261
-
262
- return -torch.autograd.grad(loss, x_in)[0] * self.args.classifier_scale
263
-
264
- def cond_fn(x, t, y=None):
265
- if self.args.prompt == "":
266
- return torch.zeros_like(x)
267
- # pdb.set_trace()
268
- with torch.enable_grad():
269
- x = x.detach().requires_grad_()
270
-
271
- t_unscale = self.unscale_timestep(t)
272
-
273
- '''
274
- out = self.diffusion.p_mean_variance(
275
- self.model, x, t, clip_denoised=False, model_kwargs={"y": y}
276
- )
277
- '''
278
- out = self.diffusion.p_mean_variance(
279
- self.model, x, t_unscale, clip_denoised=False, model_kwargs={"y": None}
280
- )
281
-
282
- fac = self.diffusion.sqrt_one_minus_alphas_cumprod[t_unscale[0].item()]
283
- # x_in = out["pred_xstart"] * fac + x * (1 - fac)
284
- x_in = out["pred_xstart"] # Revised by XX, 2022.07.14
285
-
286
- loss = torch.tensor(0)
287
- if self.args.classifier_scale != 0 and y is not None:
288
- # gradient_class_guided = class_guided(x, y, t)
289
- gradient_class_guided = class_guided(x_in, y, t)
290
-
291
- if self.args.background_complex != 0:
292
- if self.args.hard:
293
- loss = loss - self.args.background_complex*self.hf_loss((x_in+1.)/2.)
294
- else:
295
- loss = loss + self.args.background_complex*self.hf_loss((x_in+1.)/2.)
296
-
297
- if self.args.clip_guidance_lambda != 0:
298
- clip_loss = self.clip_loss(x_in, text_embed) * self.args.clip_guidance_lambda
299
- loss = loss + clip_loss
300
- self.metrics_accumulator.update_metric("clip_loss", clip_loss.item())
301
-
302
- if self.args.range_lambda != 0:
303
- r_loss = range_loss(out["pred_xstart"]).sum() * self.args.range_lambda
304
- loss = loss + r_loss
305
- self.metrics_accumulator.update_metric("range_loss", r_loss.item())
306
-
307
- if self.args.background_preservation_loss:
308
- x_in = out["pred_xstart"] * fac + x * (1 - fac)
309
- if self.mask is not None:
310
- # masked_background = x_in * (1 - self.mask)
311
- masked_background = x_in * self.mask # 2022.07.19
312
- else:
313
- masked_background = x_in
314
-
315
- if self.args.lpips_sim_lambda:
316
- '''
317
- loss = (
318
- loss
319
- + self.lpips_model(masked_background, self.init_image).sum()
320
- * self.args.lpips_sim_lambda
321
- )
322
- '''
323
- # 2022.07.19
324
- loss = (
325
- loss
326
- + self.lpips_model(masked_background, self.init_image*self.mask).sum()
327
- * self.args.lpips_sim_lambda
328
- )
329
- if self.args.l2_sim_lambda:
330
- '''
331
- loss = (
332
- loss
333
- + mse_loss(masked_background, self.init_image) * self.args.l2_sim_lambda
334
- )
335
- '''
336
- # 2022.07.19
337
- loss = (
338
- loss
339
- + mse_loss(masked_background, self.init_image*self.mask) * self.args.l2_sim_lambda
340
- )
341
-
342
-
343
- if self.args.classifier_scale != 0 and y is not None:
344
- return -torch.autograd.grad(loss, x)[0] + gradient_class_guided
345
- else:
346
- return -torch.autograd.grad(loss, x)[0]
347
-
348
- @torch.no_grad()
349
- def postprocess_fn(out, t):
350
- if self.args.coarse_to_fine:
351
- if t > 50:
352
- kernel = 51
353
- elif t > 35:
354
- kernel = 31
355
- else:
356
- kernel = 0
357
- if kernel > 0:
358
- max_pool = torch.nn.MaxPool2d(kernel_size=kernel, stride=1, padding=int((kernel-1)/2))
359
- self.mask_d = 1 - self.mask
360
- self.mask_d = max_pool(self.mask_d)
361
- self.mask_d = 1 - self.mask_d
362
- else:
363
- self.mask_d = self.mask
364
- else:
365
- self.mask_d = self.mask
366
-
367
- if self.mask is not None:
368
- background_stage_t = self.diffusion.q_sample(self.init_image_2, t[0])
369
- background_stage_t = torch.tile(
370
- background_stage_t, dims=(self.args.batch_size, 1, 1, 1)
371
- )
372
- out["sample"] = out["sample"] * self.mask_d + background_stage_t * (1 - self.mask_d)
373
-
374
- return out
375
-
376
- save_image_interval = self.diffusion.num_timesteps // 5
377
- for iteration_number in range(self.args.iterations_num):
378
- print(f"Start iterations {iteration_number}")
379
-
380
- sample_func = (
381
- self.diffusion.ddim_sample_loop_progressive
382
- if self.args.ddim
383
- else self.diffusion.p_sample_loop_progressive
384
- )
385
- samples = sample_func(
386
- self.model_fn,
387
- (
388
- self.args.batch_size,
389
- 3,
390
- self.model_config["image_size"],
391
- self.model_config["image_size"],
392
- ),
393
- clip_denoised=False,
394
- # model_kwargs={}
395
- # if self.args.model_output_size == 256
396
- # else {
397
- # "y": torch.zeros([self.args.batch_size], device=self.device, dtype=torch.long)
398
- # },
399
- model_kwargs={}
400
- if self.args.classifier_scale == 0
401
- else {"y": self.args.y*torch.ones([self.args.batch_size], device=self.device, dtype=torch.long)},
402
- cond_fn=cond_fn,
403
- device=self.device,
404
- progress=True,
405
- skip_timesteps=self.args.skip_timesteps,
406
- init_image=self.init_image,
407
- # init_image=self.init_image_,
408
- postprocess_fn=None if self.args.local_clip_guided_diffusion else postprocess_fn,
409
- randomize_class=True if self.args.classifier_scale == 0 else False,
410
- )
411
-
412
- intermediate_samples = [[] for i in range(self.args.batch_size)]
413
- total_steps = self.diffusion.num_timesteps - self.args.skip_timesteps - 1
414
- for j, sample in enumerate(samples):
415
- should_save_image = j % save_image_interval == 0 or j == total_steps
416
- if should_save_image or self.args.save_video:
417
- self.metrics_accumulator.print_average_metric()
418
-
419
- for b in range(self.args.batch_size):
420
- pred_image = sample["pred_xstart"][b]
421
- visualization_path = Path(
422
- os.path.join(self.args.output_path, self.args.output_file)
423
- )
424
- visualization_path = visualization_path.with_stem(
425
- f"{visualization_path.stem}_i_{iteration_number}_b_{b}"
426
- )
427
- if (
428
- self.mask is not None
429
- and self.args.enforce_background
430
- and j == total_steps
431
- and not self.args.local_clip_guided_diffusion
432
- ):
433
- pred_image = (
434
- self.init_image_2[0] * (1 - self.mask[0]) + pred_image * self.mask[0]
435
- )
436
- '''
437
- if j == total_steps:
438
- pdb.set_trace()
439
- pred_image = (
440
- self.init_image_2[0] * (1 - self.mask[0]) + pred_image * self.mask[0]
441
- )
442
- '''
443
- pred_image = pred_image.add(1).div(2).clamp(0, 1)
444
- pred_image_pil = TF.to_pil_image(pred_image)
445
- masked_pred_image = self.mask * pred_image.unsqueeze(0)
446
- final_distance = self.unaugmented_clip_distance(
447
- masked_pred_image, text_embed
448
- )
449
- formatted_distance = f"{final_distance:.4f}"
450
-
451
- if self.args.export_assets:
452
- pred_path = self.assets_path / visualization_path.name
453
- pred_image_pil.save(pred_path, quality=100)
454
-
455
- if j == total_steps:
456
- path_friendly_distance = formatted_distance.replace(".", "")
457
- ranked_pred_path = self.ranked_results_path / (
458
- path_friendly_distance + "_" + visualization_path.name
459
- )
460
- pred_image_pil.save(ranked_pred_path, quality=100)
461
-
462
- intermediate_samples[b].append(pred_image_pil)
463
- if should_save_image:
464
- show_editied_masked_image(
465
- title=self.args.prompt,
466
- source_image=self.init_image_pil,
467
- edited_image=pred_image_pil,
468
- mask=self.mask_pil,
469
- path=visualization_path,
470
- distance=formatted_distance,
471
- )
472
-
473
- if self.args.save_video:
474
- for b in range(self.args.batch_size):
475
- video_name = self.args.output_file.replace(
476
- ".png", f"_i_{iteration_number}_b_{b}.avi"
477
- )
478
- video_path = os.path.join(self.args.output_path, video_name)
479
- save_video(intermediate_samples[b], video_path)
480
-
481
- visualize_size = (256,256)
482
- img_ori = cv2.imread(self.args.init_image_2)
483
- img_ori = cv2.resize(img_ori, visualize_size)
484
- mask = cv2.imread(self.args.mask)
485
- mask = cv2.resize(mask, visualize_size)
486
- imgs = [img_ori, mask]
487
- for ii, img_name in enumerate(os.listdir(os.path.join(self.args.output_path, 'ranked'))):
488
- img_path = os.path.join(self.args.output_path, 'ranked', img_name)
489
- img = cv2.imread(img_path)
490
- img = cv2.resize(img, visualize_size)
491
- imgs.append(img)
492
- if ii >= 7:
493
- break
494
-
495
- img_whole = cv2.hconcat(imgs[2:])
496
- '''
497
- img_name = self.args.output_path.split('/')[-2]+'/'
498
- if self.args.coarse_to_fine:
499
- if self.args.clip_guidance_lambda == 0:
500
- prompt = 'coarse_to_fine_no_clip'
501
- else:
502
- prompt = 'coarse_to_fine'
503
- elif self.args.image_guide:
504
- prompt = 'image_guide'
505
- elif self.args.clip_guidance_lambda == 0:
506
- prompt = 'no_clip_guide'
507
- else:
508
- prompt = 'text_guide'
509
- '''
510
-
511
- cv2.imwrite(os.path.join(self.args.final_save_root, 'edited.png'), img_whole, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
512
-
513
-
514
- def reconstruct_image(self):
515
- init = Image.open(self.args.init_image).convert("RGB")
516
- init = init.resize(
517
- self.image_size, # type: ignore
518
- Image.LANCZOS,
519
- )
520
- init = TF.to_tensor(init).to(self.device).unsqueeze(0).mul(2).sub(1)
521
-
522
- samples = self.diffusion.p_sample_loop_progressive(
523
- self.model,
524
- (1, 3, self.model_config["image_size"], self.model_config["image_size"],),
525
- clip_denoised=False,
526
- model_kwargs={}
527
- if self.args.model_output_size == 256
528
- else {"y": torch.zeros([self.args.batch_size], device=self.device, dtype=torch.long)},
529
- cond_fn=None,
530
- progress=True,
531
- skip_timesteps=self.args.skip_timesteps,
532
- init_image=init,
533
- randomize_class=True,
534
- )
535
- save_image_interval = self.diffusion.num_timesteps // 5
536
- max_iterations = self.diffusion.num_timesteps - self.args.skip_timesteps - 1
537
-
538
- for j, sample in enumerate(samples):
539
- if j % save_image_interval == 0 or j == max_iterations:
540
- print()
541
- filename = os.path.join(self.args.output_path, self.args.output_file)
542
- TF.to_pil_image(sample["pred_xstart"][0].add(1).div(2).clamp(0, 1)).save(filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/data/zip.py DELETED
@@ -1,74 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing
8
- import zipfile
9
-
10
- from dataclasses import dataclass
11
- from functools import lru_cache
12
- from typing_extensions import Literal
13
-
14
-
15
- DEFAULT_SIZE = 32
16
- MODE = Literal['r', 'w', 'x', 'a']
17
-
18
-
19
- @dataclass(order=True)
20
- class PathInZip:
21
- """Class for holding a path of file within a zip file.
22
-
23
- Args:
24
- path: The convention is <path_to_zip>:<relative_path_inside_zip>
25
- Let's assume there is a zip file /some/location/foo.zip
26
- and inside of it is a json file located at /data/file1.json,
27
- Then we expect path = "/some/location/foo.zip:/data/file1.json"
28
- """
29
-
30
- INFO_PATH_SEP = ':'
31
- zip_path: str
32
- file_path: str
33
-
34
- def __init__(self, path: str) -> None:
35
- split_path = path.split(self.INFO_PATH_SEP)
36
- assert len(split_path) == 2
37
- self.zip_path, self.file_path = split_path
38
-
39
- @classmethod
40
- def from_paths(cls, zip_path: str, file_path: str):
41
- return cls(zip_path + cls.INFO_PATH_SEP + file_path)
42
-
43
- def __str__(self) -> str:
44
- return self.zip_path + self.INFO_PATH_SEP + self.file_path
45
-
46
-
47
- def _open_zip(path: str, mode: MODE = 'r'):
48
- return zipfile.ZipFile(path, mode)
49
-
50
-
51
- _cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip)
52
-
53
-
54
- def set_zip_cache_size(max_size: int):
55
- """Sets the maximal LRU caching for zip file opening.
56
-
57
- Args:
58
- max_size: the maximal LRU cache.
59
- """
60
- global _cached_open_zip
61
- _cached_open_zip = lru_cache(max_size)(_open_zip)
62
-
63
-
64
- def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO:
65
- """Opens a file stored inside a zip and returns a file-like object.
66
-
67
- Args:
68
- path_in_zip: A PathInZip object representing the file to return a file-like object of.
69
- mode: The mode in which to open the file with.
70
- Returns:
71
- A file-like object for PathInZip.
72
- """
73
- zf = _cached_open_zip(path_in_zip.zip_path)
74
- return zf.open(path_in_zip.file_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Astroomx/Mine/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Mine
3
- emoji: 🦀
4
- colorFrom: pink
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_envs.py DELETED
@@ -1,188 +0,0 @@
1
- import functools
2
- import importlib.metadata
3
- import logging
4
- import os
5
- import pathlib
6
- import sys
7
- import zipfile
8
- import zipimport
9
- from typing import Iterator, List, Optional, Sequence, Set, Tuple
10
-
11
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
12
-
13
- from pip._internal.metadata.base import BaseDistribution, BaseEnvironment
14
- from pip._internal.models.wheel import Wheel
15
- from pip._internal.utils.deprecation import deprecated
16
- from pip._internal.utils.filetypes import WHEEL_EXTENSION
17
-
18
- from ._compat import BadMetadata, BasePath, get_dist_name, get_info_location
19
- from ._dists import Distribution
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
-
24
- def _looks_like_wheel(location: str) -> bool:
25
- if not location.endswith(WHEEL_EXTENSION):
26
- return False
27
- if not os.path.isfile(location):
28
- return False
29
- if not Wheel.wheel_file_re.match(os.path.basename(location)):
30
- return False
31
- return zipfile.is_zipfile(location)
32
-
33
-
34
- class _DistributionFinder:
35
- """Finder to locate distributions.
36
-
37
- The main purpose of this class is to memoize found distributions' names, so
38
- only one distribution is returned for each package name. At lot of pip code
39
- assumes this (because it is setuptools's behavior), and not doing the same
40
- can potentially cause a distribution in lower precedence path to override a
41
- higher precedence one if the caller is not careful.
42
-
43
- Eventually we probably want to make it possible to see lower precedence
44
- installations as well. It's useful feature, after all.
45
- """
46
-
47
- FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
48
-
49
- def __init__(self) -> None:
50
- self._found_names: Set[NormalizedName] = set()
51
-
52
- def _find_impl(self, location: str) -> Iterator[FoundResult]:
53
- """Find distributions in a location."""
54
- # Skip looking inside a wheel. Since a package inside a wheel is not
55
- # always valid (due to .data directories etc.), its .dist-info entry
56
- # should not be considered an installed distribution.
57
- if _looks_like_wheel(location):
58
- return
59
- # To know exactly where we find a distribution, we have to feed in the
60
- # paths one by one, instead of dumping the list to importlib.metadata.
61
- for dist in importlib.metadata.distributions(path=[location]):
62
- info_location = get_info_location(dist)
63
- try:
64
- raw_name = get_dist_name(dist)
65
- except BadMetadata as e:
66
- logger.warning("Skipping %s due to %s", info_location, e.reason)
67
- continue
68
- normalized_name = canonicalize_name(raw_name)
69
- if normalized_name in self._found_names:
70
- continue
71
- self._found_names.add(normalized_name)
72
- yield dist, info_location
73
-
74
- def find(self, location: str) -> Iterator[BaseDistribution]:
75
- """Find distributions in a location.
76
-
77
- The path can be either a directory, or a ZIP archive.
78
- """
79
- for dist, info_location in self._find_impl(location):
80
- if info_location is None:
81
- installed_location: Optional[BasePath] = None
82
- else:
83
- installed_location = info_location.parent
84
- yield Distribution(dist, info_location, installed_location)
85
-
86
- def find_linked(self, location: str) -> Iterator[BaseDistribution]:
87
- """Read location in egg-link files and return distributions in there.
88
-
89
- The path should be a directory; otherwise this returns nothing. This
90
- follows how setuptools does this for compatibility. The first non-empty
91
- line in the egg-link is read as a path (resolved against the egg-link's
92
- containing directory if relative). Distributions found at that linked
93
- location are returned.
94
- """
95
- path = pathlib.Path(location)
96
- if not path.is_dir():
97
- return
98
- for child in path.iterdir():
99
- if child.suffix != ".egg-link":
100
- continue
101
- with child.open() as f:
102
- lines = (line.strip() for line in f)
103
- target_rel = next((line for line in lines if line), "")
104
- if not target_rel:
105
- continue
106
- target_location = str(path.joinpath(target_rel))
107
- for dist, info_location in self._find_impl(target_location):
108
- yield Distribution(dist, info_location, path)
109
-
110
- def _find_eggs_in_dir(self, location: str) -> Iterator[BaseDistribution]:
111
- from pip._vendor.pkg_resources import find_distributions
112
-
113
- from pip._internal.metadata import pkg_resources as legacy
114
-
115
- with os.scandir(location) as it:
116
- for entry in it:
117
- if not entry.name.endswith(".egg"):
118
- continue
119
- for dist in find_distributions(entry.path):
120
- yield legacy.Distribution(dist)
121
-
122
- def _find_eggs_in_zip(self, location: str) -> Iterator[BaseDistribution]:
123
- from pip._vendor.pkg_resources import find_eggs_in_zip
124
-
125
- from pip._internal.metadata import pkg_resources as legacy
126
-
127
- try:
128
- importer = zipimport.zipimporter(location)
129
- except zipimport.ZipImportError:
130
- return
131
- for dist in find_eggs_in_zip(importer, location):
132
- yield legacy.Distribution(dist)
133
-
134
- def find_eggs(self, location: str) -> Iterator[BaseDistribution]:
135
- """Find eggs in a location.
136
-
137
- This actually uses the old *pkg_resources* backend. We likely want to
138
- deprecate this so we can eventually remove the *pkg_resources*
139
- dependency entirely. Before that, this should first emit a deprecation
140
- warning for some versions when using the fallback since importing
141
- *pkg_resources* is slow for those who don't need it.
142
- """
143
- if os.path.isdir(location):
144
- yield from self._find_eggs_in_dir(location)
145
- if zipfile.is_zipfile(location):
146
- yield from self._find_eggs_in_zip(location)
147
-
148
-
149
- @functools.lru_cache(maxsize=None) # Warn a distribution exactly once.
150
- def _emit_egg_deprecation(location: Optional[str]) -> None:
151
- deprecated(
152
- reason=f"Loading egg at {location} is deprecated.",
153
- replacement="to use pip for package installation.",
154
- gone_in=None,
155
- )
156
-
157
-
158
- class Environment(BaseEnvironment):
159
- def __init__(self, paths: Sequence[str]) -> None:
160
- self._paths = paths
161
-
162
- @classmethod
163
- def default(cls) -> BaseEnvironment:
164
- return cls(sys.path)
165
-
166
- @classmethod
167
- def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
168
- if paths is None:
169
- return cls(sys.path)
170
- return cls(paths)
171
-
172
- def _iter_distributions(self) -> Iterator[BaseDistribution]:
173
- finder = _DistributionFinder()
174
- for location in self._paths:
175
- yield from finder.find(location)
176
- for dist in finder.find_eggs(location):
177
- # _emit_egg_deprecation(dist.location) # TODO: Enable this.
178
- yield dist
179
- # This must go last because that's how pkg_resources tie-breaks.
180
- yield from finder.find_linked(location)
181
-
182
- def get_distribution(self, name: str) -> Optional[BaseDistribution]:
183
- matches = (
184
- distribution
185
- for distribution in self.iter_all_distributions()
186
- if distribution.canonical_name == canonicalize_name(name)
187
- )
188
- return next(matches, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py DELETED
@@ -1,83 +0,0 @@
1
- """
2
- pygments.formatters.pangomarkup
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- Formatter for Pango markup output.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- from pip._vendor.pygments.formatter import Formatter
12
-
13
-
14
- __all__ = ['PangoMarkupFormatter']
15
-
16
-
17
- _escape_table = {
18
- ord('&'): '&amp;',
19
- ord('<'): '&lt;',
20
- }
21
-
22
-
23
- def escape_special_chars(text, table=_escape_table):
24
- """Escape & and < for Pango Markup."""
25
- return text.translate(table)
26
-
27
-
28
- class PangoMarkupFormatter(Formatter):
29
- """
30
- Format tokens as Pango Markup code. It can then be rendered to an SVG.
31
-
32
- .. versionadded:: 2.9
33
- """
34
-
35
- name = 'Pango Markup'
36
- aliases = ['pango', 'pangomarkup']
37
- filenames = []
38
-
39
- def __init__(self, **options):
40
- Formatter.__init__(self, **options)
41
-
42
- self.styles = {}
43
-
44
- for token, style in self.style:
45
- start = ''
46
- end = ''
47
- if style['color']:
48
- start += '<span fgcolor="#%s">' % style['color']
49
- end = '</span>' + end
50
- if style['bold']:
51
- start += '<b>'
52
- end = '</b>' + end
53
- if style['italic']:
54
- start += '<i>'
55
- end = '</i>' + end
56
- if style['underline']:
57
- start += '<u>'
58
- end = '</u>' + end
59
- self.styles[token] = (start, end)
60
-
61
- def format_unencoded(self, tokensource, outfile):
62
- lastval = ''
63
- lasttype = None
64
-
65
- outfile.write('<tt>')
66
-
67
- for ttype, value in tokensource:
68
- while ttype not in self.styles:
69
- ttype = ttype.parent
70
- if ttype == lasttype:
71
- lastval += escape_special_chars(value)
72
- else:
73
- if lastval:
74
- stylebegin, styleend = self.styles[lasttype]
75
- outfile.write(stylebegin + lastval + styleend)
76
- lastval = escape_special_chars(value)
77
- lasttype = ttype
78
-
79
- if lastval:
80
- stylebegin, styleend = self.styles[lasttype]
81
- outfile.write(stylebegin + lastval + styleend)
82
-
83
- outfile.write('</tt>')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AutomationVR/ImageDemo/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/stabilityai/stable-diffusion-xl-base-1.0").launch()
 
 
 
 
spaces/Bart92/RVC_HF/julius/utils.py DELETED
@@ -1,101 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2020
3
- """
4
- Non signal processing related utilities.
5
- """
6
-
7
- import inspect
8
- import typing as tp
9
- import sys
10
- import time
11
-
12
-
13
- def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None,
14
- overrides: dict = {}):
15
- """
16
- Return a simple representation string for `obj`.
17
- If `attrs` is not None, it should be a list of attributes to include.
18
- """
19
- params = inspect.signature(obj.__class__).parameters
20
- attrs_repr = []
21
- if attrs is None:
22
- attrs = list(params.keys())
23
- for attr in attrs:
24
- display = False
25
- if attr in overrides:
26
- value = overrides[attr]
27
- elif hasattr(obj, attr):
28
- value = getattr(obj, attr)
29
- else:
30
- continue
31
- if attr in params:
32
- param = params[attr]
33
- if param.default is inspect._empty or value != param.default: # type: ignore
34
- display = True
35
- else:
36
- display = True
37
-
38
- if display:
39
- attrs_repr.append(f"{attr}={value}")
40
- return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
41
-
42
-
43
- class MarkdownTable:
44
- """
45
- Simple MarkdownTable generator. The column titles should be large enough
46
- for the lines content. This will right align everything.
47
-
48
- >>> import io # we use io purely for test purposes, default is sys.stdout.
49
- >>> file = io.StringIO()
50
- >>> table = MarkdownTable(["Item Name", "Price"], file=file)
51
- >>> table.header(); table.line(["Honey", "5"]); table.line(["Car", "5,000"])
52
- >>> print(file.getvalue().strip()) # Strip for test purposes
53
- | Item Name | Price |
54
- |-----------|-------|
55
- | Honey | 5 |
56
- | Car | 5,000 |
57
- """
58
- def __init__(self, columns, file=sys.stdout):
59
- self.columns = columns
60
- self.file = file
61
-
62
- def _writeln(self, line):
63
- self.file.write("|" + "|".join(line) + "|\n")
64
-
65
- def header(self):
66
- self._writeln(f" {col} " for col in self.columns)
67
- self._writeln("-" * (len(col) + 2) for col in self.columns)
68
-
69
- def line(self, line):
70
- out = []
71
- for val, col in zip(line, self.columns):
72
- val = format(val, '>' + str(len(col)))
73
- out.append(" " + val + " ")
74
- self._writeln(out)
75
-
76
-
77
- class Chrono:
78
- """
79
- Measures ellapsed time, calling `torch.cuda.synchronize` if necessary.
80
- `Chrono` instances can be used as context managers (e.g. with `with`).
81
- Upon exit of the block, you can access the duration of the block in seconds
82
- with the `duration` attribute.
83
-
84
- >>> with Chrono() as chrono:
85
- ... _ = sum(range(10_000))
86
- ...
87
- >>> print(chrono.duration < 10) # Should be true unless on a really slow computer.
88
- True
89
- """
90
- def __init__(self):
91
- self.duration = None
92
-
93
- def __enter__(self):
94
- self._begin = time.time()
95
- return self
96
-
97
- def __exit__(self, exc_type, exc_value, exc_tracebck):
98
- import torch
99
- if torch.cuda.is_available():
100
- torch.cuda.synchronize()
101
- self.duration = time.time() - self._begin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Ciudad Dragn Mvil Mod Apk Dinero Ilimitado Y Gemas 2022.md DELETED
@@ -1,35 +0,0 @@
1
- <br />
2
- <h1>Dragon City móvil Mod APK dinero ilimitado y joyas 2022</h1>
3
- <p>¿Te gustan los dragones? ¿Quieres construir tu propia ciudad dragón y gobernar los cielos? ¿Quieres tener recursos ilimitados y acceso a todas las características del juego? Si es así, entonces estás en el lugar correcto. En este artículo, le diremos todo lo que necesita saber sobre Dragon City Mobile Mod APK, una versión modificada del popular juego de simulación que le permite disfrutar de dinero y gemas ilimitadas, dragones e islas ilimitadas, fácil cría y eclosión, sin anuncios, y sin raíz requerida. Sigue leyendo para saber más. </p>
4
- <h2>ciudad dragón móvil mod apk dinero ilimitado y gemas 2022</h2><br /><p><b><b>Download File</b> > <a href="https://bltlly.com/2v6IUX">https://bltlly.com/2v6IUX</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3>¿Qué es Dragon City Mobile? </h3>
7
- <p>Dragon City Mobile es un juego de simulación desarrollado por Socialpoint, donde puedes crear tu propia ciudad dragón en islas flotantes y llenarla de granjas, hábitats, edificios y dragones. Puedes recoger más de 1000 dragones diferentes y criarlos para crear otros nuevos. También puedes entrenar a tus dragones y hacerlos luchar en arenas contra otros jugadores. Puedes unirte a alianzas, chatear con otros maestros dragones, participar en eventos y completar misiones para ganar recompensas. Dragon City Mobile es un juego divertido y adictivo que te mantendrá entretenido durante horas. </p>
8
- <h3>¿Qué es Dragon City Mobile Mod APK? </h3>
9
- <p>Dragon City Mobile Mod APK es una versión modificada del juego original que le da acceso a dinero y gemas ilimitadas, dragones e islas ilimitadas, fácil cría y eclosión, sin anuncios, y no se requiere raíz. Con este mod apk, se puede disfrutar de todas las características del juego sin limitaciones o restricciones. Puedes comprar lo que quieras, desbloquear cualquier dragón que quieras, expandir tu ciudad tanto como quieras, criar y eclosionar cualquier dragón que quieras, y jugar el juego sin interrupciones ni molestias. </p>
10
- <h3> ¿Por qué usar Dragon City Mobile Mod APK? </h3>
11
- <p>Hay muchas razones por las que debe utilizar Dragon City Mobile Mod APK en lugar del juego original. Aquí están algunos de ellos:</p>
12
- <ul>
13
-
14
- <li>Usted puede tener más diversión y emoción por conseguir dragones e islas ilimitadas de forma gratuita. No tienes que esperar horas o días para criar o eclosionar a tus dragones. Puedes conseguir cualquier dragón que quieras al instante. También puedes expandir tu ciudad tanto como quieras y decorarla con varios edificios y objetos. </li>
15
- <li>Usted puede tener más control y flexibilidad al obtener fácil cría y eclosión gratis. No tienes que seguir ninguna regla o patrón para criar o eclosionar a tus dragones. Puedes mezclar los dos dragones que quieras y conseguir uno nuevo. También puedes acelerar el proceso usando gemas. </li>
16
- <li>Usted puede tener una mejor experiencia de juego mediante la obtención de ningún anuncio y ninguna raíz necesaria de forma gratuita. No tienes que lidiar con anuncios molestos que aparecen cada pocos minutos o interrumpen tu juego. Tampoco tienes que rootear tu dispositivo o arriesgarte a dañarlo para usar el mod apk. </li>
17
- </ul>
18
- <h2>Características de Dragon City Mobile Mod APK</h2>
19
- <h3>Dinero ilimitado y gemas</h3>
20
- <p>La característica más importante de Dragon City Mobile Mod APK es que le da dinero ilimitado y gemas gratis. El dinero y las gemas son las principales monedas en el juego que necesitas para comprar objetos, desbloquear dragones, expandir tu ciudad, acelerar los procesos, etc. Con dinero y gemas ilimitadas, puedes comprar cualquier cosa que quieras es una versión modificada del juego original que te da acceso a dinero ilimitado y gemas, dragones e islas ilimitadas, fácil cría y eclosión, sin anuncios, y no se requiere raíz. Con este mod apk, se puede disfrutar de todas las características del juego sin limitaciones o restricciones. Puedes comprar lo que quieras, desbloquear cualquier dragón que quieras, expandir tu ciudad tanto como quieras, criar y eclosionar cualquier dragón que quieras, y jugar el juego sin interrupciones ni molestias. </p>
21
-
22
- <h2>Preguntas frecuentes</h2>
23
- <p>Aquí hay algunas preguntas frecuentes sobre Dragon City Mobile Mod APK:</p>
24
- <p></p>
25
- <h3>Q: ¿Es seguro usar Dragon City Mobile Mod APK? </h3>
26
- <p>A: Sí, Dragon City Mobile Mod APK es seguro de usar, siempre y cuando lo descargue de una fuente confiable. Hemos probado el archivo apk mod en nuestros dispositivos y no encontramos malware o virus. Sin embargo, le recomendamos que escanee el archivo con un antivirus antes de instalarlo, solo para estar seguro. </p>
27
- <h3>Q: ¿Es Dragon City Mobile Mod APK compatible con mi dispositivo? </h3>
28
- <p>A: Dragon City Mobile Mod APK es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no ser compatibles con el mod apk debido a diferentes especificaciones o configuraciones. Si encuentras algún problema al instalar o reproducir el apk mod, intenta cambiar la configuración de tu dispositivo o ponte en contacto con el desarrollador para obtener ayuda. </p>
29
- <h3>Q: ¿Voy a conseguir prohibido para el uso de Dragon City Mobile Mod APK? </h3>
30
- <p>A: No, no se le prohibió el uso de Dragon City Mobile Mod APK, como el mod apk no interfiere con los servidores del juego o características en línea. Puedes jugar el juego normalmente con otros jugadores sin ningún riesgo de ser expulsado. Sin embargo, le aconsejamos que utilice el apk mod de forma responsable y no abusar de sus características para obtener una ventaja injusta sobre otros jugadores. </p>
31
- <h3>Q: ¿Puedo actualizar Dragon City Mobile Mod APK? </h3>
32
- <p>A: Sí, puede actualizar Dragon City Mobile Mod APK cada vez que una nueva versión está disponible. Sin embargo, tendrá que descargar e instalar el nuevo archivo apk mod manualmente desde la misma fuente que antes. No se puede actualizar el mod apk desde la Google Play Store o el sitio web oficial del juego. </p>
33
- <h3>Q: ¿Puedo usar Dragon City Mobile Mod APK con mi cuenta existente? </h3> 64aa2da5cf<br />
34
- <br />
35
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Conseguir Sobre l Descarga Gratuita Para Pc Ventanas 7 Apkpure.md DELETED
@@ -1,44 +0,0 @@
1
-
2
- <h1>Captain Tsubasa: Dream Team - El juego de simulación de fútbol definitivo</h1>
3
- <p>Si eres un fan del fútbol y el manga, es posible que hayas oído hablar del Capitán Tsubasa, la popular serie de cómics que influyó en muchas estrellas del fútbol y jugadores de todo el mundo. Pero ¿sabías que hay un juego basado en este cómic que te permite crear tu propio equipo de ensueño y tener partidos acalorados con jugadores de diferentes países? En este artículo, te presentaremos Captain Tsubasa: Dream Team, el juego de simulación de fútbol competitivo amado por más de 150 países. Te diremos de qué se trata este juego, cómo jugarlo y por qué deberías probarlo. </p>
4
- <h2>¿Qué es el Capitán Tsubasa: Dream Team? </h2>
5
- <h3>El juego basado en el popular cómic de fútbol</h3>
6
- <p>Captain Tsubasa: Dream Team es un juego desarrollado por KLab, una compañía japonesa que se especializa en juegos móviles. Se basa en la serie de manga Captain Tsubasa, que fue creada por Yoichi Takahashi en 1981 y ha sido serializada en varias revistas y adaptada al anime, películas y videojuegos. El cómic sigue las aventuras de Tsubasa Ozora, un joven prodigio del fútbol que sueña con convertirse en un jugador de clase mundial y ganar la Copa del Mundo para Japón. En el camino, conoce a muchos amigos y rivales que comparten su pasión por el deporte y lo desafían a mejorar sus habilidades. </p>
7
- <h2>conseguir sobre él descarga gratuita para pc ventanas 7 apkpure</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://bltlly.com/2v6Jgg">https://bltlly.com/2v6Jgg</a></b></p><br /><br />
8
- <h3>Las características y modos del juego</h3>
9
- <p>Captain Tsubasa: Dream Team es un juego que combina elementos de simulación de fútbol, juegos de rol y recolección de cartas. Puedes elegir entre cientos de personajes del cómic original, cada uno con sus propias habilidades y habilidades únicas, y formar tu propio equipo de ensueño. También puedes personalizar los uniformes, las formaciones y las habilidades de tu equipo para adaptarlos a tus preferencias y estrategias. </p>
10
-
11
- <h2>¿Cómo se juega Captain Tsubasa: Dream Team? </h2>
12
- <h3>Cómo crear tu propio equipo de ensueño</h3>
13
- <p>Para empezar a jugar Captain Tsubasa: Dream Team, necesitas crear tu propio equipo. Puedes hacer esto usando Transfer Tickets o Dreamballs, que son las monedas del juego, para obtener jugadores de varios banners. También puedes conseguir jugadores de eventos, misiones, redadas y escenarios. Puedes tener hasta 32 jugadores en tu equipo, pero solo 11 pueden jugar en el campo a la vez. </p>
14
- <p>Puedes asignar diferentes posiciones y roles a tus jugadores de acuerdo a sus atributos y habilidades. Hay cinco atributos en el juego: Agilidad (azul), Habilidad (verde), Dureza (rojo), Solidaridad (amarillo) y Insight Master (púrpura). Cada atributo tiene sus propias fortalezas y debilidades contra otros atributos. También hay cuatro roles en el juego: Delantero (FW), Centrocampista (MF), Defensor (DF) y Portero (GK). Cada rol tiene sus propias responsabilidades y funciones en el campo. </p>
15
- <h3>Cómo mejorar tus jugadores y habilidades</h3>
16
- <p>Para hacer tu equipo más fuerte, necesitas mejorar tus jugadores y habilidades. Usted puede hacer esto mediante el uso de varios elementos y materiales que usted puede obtener de jugar el juego. Puede mejorar el nivel de sus jugadores, rareza, potencial, y límite de rotura mediante el uso de entrenadores, taladros, cuadernos, y limitar los elementos de ruptura. También puedes mejorar el nivel y la evolución de tus habilidades mediante el uso de jugadores de campo de habilidad, bolas negras y cartas de eliminación de habilidades. También puedes transferir habilidades de un jugador a otro usando tickets de transferencia de habilidades. </p>
17
- <h3>Cómo competir con otros jugadores de todo el mundo</h3>
18
- <p>Para poner a prueba tus habilidades y estrategias, puedes jugar partidas online con otros jugadores de todo el mundo. Puedes elegir entre diferentes modos, como Rank Match, Group Match, Friendly Match y Quick Match. Cada modo tiene sus propias reglas y recompensas. También puede unirse o crear un club con otros jugadores y chatear, cooperar y competir con ellos. </p>
19
-
20
- <p>El resultado del partido depende de varios factores, tales como los atributos de tus jugadores, habilidades, resistencia, poder de equipo, habilidades de equipo, lazos, habilidades ocultas y habilidades pasivas. También debe considerar la ventaja de emparejamiento, la tasa crítica, la distancia, el ángulo y el momento de sus acciones. Necesitas usar tu ingenio y creatividad para vencer a tus oponentes y ganar el partido. </p>
21
- <h2>¿Por qué deberías jugar Captain Tsubasa: Dream Team? </h2>
22
- <h3>El juego es divertido y atractivo para los aficionados al fútbol</h3>
23
- <p>Si te gusta el fútbol, te encantará Captain Tsubasa: Dream Team. El juego es divertido y atractivo para los aficionados al fútbol de todas las edades y niveles. Usted puede disfrutar de la emoción y la emoción de los partidos de fútbol con gráficos realistas y efectos de sonido. También puede experimentar el drama y la emoción del cómic original con impresionantes animaciones y voces en off. También puedes aprender más sobre las tácticas y técnicas del fútbol en los tutoriales y consejos del juego. </p>
24
- <p></p>
25
- <h3>El juego es fiel al cómic original y los personajes</h3>
26
- <p>Si eres un fan de Captain Tsubasa, apreciarás lo fiel que es el juego al cómic y a los personajes originales. El juego cuenta con cientos de personajes del cómic, cada uno con su propia personalidad, apariencia, voz, habilidades y historia de fondo. Puedes recoger tus personajes favoritos y revivir sus momentos icónicos en el juego. También puedes descubrir nuevas historias y escenarios que son exclusivos del juego. </p>
27
- <h3>El juego tiene una comunidad vibrante y activa</h3>
28
-
29
- <h2>Conclusión</h2>
30
- <p>Captain Tsubasa: Dream Team es un juego que combina simulación de fútbol, juegos de rol y recolección de cartas. Se basa en la popular serie cómica Captain Tsubasa que influyó en muchas estrellas del fútbol y jugadores de todo el mundo. Te permite crear tu propio equipo de ensueño y tener partidos acalorados con jugadores de diferentes países. Es divertido y atractivo para los aficionados al fútbol de todas las edades y niveles. Es fiel al cómic y los personajes originales. Tiene una comunidad vibrante y activa a la que puedes unirte o crear. </p>
31
- <p>Si estás buscando un juego que desafíe tus habilidades y estrategias, así como entretenerte con su historia y personajes, definitivamente deberías probar Captain Tsubasa: Dream Team. Puede descargar el juego de forma gratuita desde la App Store o Google Play y comenzar su aventura de fútbol hoy. También puedes visitar el sitio web oficial del juego para obtener más información y soporte. </p>
32
- <h2>Preguntas frecuentes</h2>
33
- <h4>¿Cuáles son los requisitos del sistema para el Capitán Tsubasa: Dream Team? </h4>
34
- <p>El juego requiere iOS 10.0 o posterior, o Android 4.4 o posterior. También requiere una conexión a Internet estable y al menos 3 GB de espacio de almacenamiento gratuito. </p>
35
- <h4>¿Cómo puedo obtener más boletos de transferencia y Dreamballs? </h4>
36
- <p>Puedes obtener más Boletos de Transferencia y Dreamballs completando misiones, iniciando sesión diariamente, participando en eventos, viendo anuncios o comprándolos con dinero real. </p>
37
- <h4>¿Cómo puedo conseguir más jugadores y habilidades? </h4>
38
- <p>Puedes conseguir más jugadores y habilidades usando Transfer Tickets o Dreamballs para obtenerlos de banners, o obteniéndolos de eventos, misiones, redadas y escenarios. También puedes intercambiar medallas o monedas por jugadores y habilidades en la tienda. </p>
39
- <h4>¿Cómo puedo unirme o crear un club? </h4>
40
-
41
- <h4>¿Cómo puedo contactar al equipo de soporte del juego? </h4>
42
- <p>Puede ponerse en contacto con el equipo de soporte del juego tocando el botón de menú en la pantalla de inicio y luego tocando el botón de soporte. También puede enviar un correo electrónico a [email protected] o visitar el sitio web oficial del juego para obtener más ayuda. </p> 64aa2da5cf<br />
43
- <br />
44
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/credentials.py DELETED
@@ -1,2262 +0,0 @@
1
- # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License"). You
5
- # may not use this file except in compliance with the License. A copy of
6
- # the License is located at
7
- #
8
- # http://aws.amazon.com/apache2.0/
9
- #
10
- # or in the "license" file accompanying this file. This file is
11
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12
- # ANY KIND, either express or implied. See the License for the specific
13
- # language governing permissions and limitations under the License.
14
- import datetime
15
- import getpass
16
- import json
17
- import logging
18
- import os
19
- import subprocess
20
- import threading
21
- import time
22
- from collections import namedtuple
23
- from copy import deepcopy
24
- from hashlib import sha1
25
-
26
- from dateutil.parser import parse
27
- from dateutil.tz import tzlocal, tzutc
28
-
29
- import botocore.compat
30
- import botocore.configloader
31
- from botocore import UNSIGNED
32
- from botocore.compat import compat_shell_split, total_seconds
33
- from botocore.config import Config
34
- from botocore.exceptions import (
35
- ConfigNotFound,
36
- CredentialRetrievalError,
37
- InfiniteLoopConfigError,
38
- InvalidConfigError,
39
- MetadataRetrievalError,
40
- PartialCredentialsError,
41
- RefreshWithMFAUnsupportedError,
42
- UnauthorizedSSOTokenError,
43
- UnknownCredentialError,
44
- )
45
- from botocore.tokens import SSOTokenProvider
46
- from botocore.utils import (
47
- ContainerMetadataFetcher,
48
- FileWebIdentityTokenLoader,
49
- InstanceMetadataFetcher,
50
- JSONFileCache,
51
- SSOTokenLoader,
52
- parse_key_val_file,
53
- resolve_imds_endpoint_mode,
54
- )
55
-
56
- logger = logging.getLogger(__name__)
57
- ReadOnlyCredentials = namedtuple(
58
- 'ReadOnlyCredentials', ['access_key', 'secret_key', 'token']
59
- )
60
-
61
- _DEFAULT_MANDATORY_REFRESH_TIMEOUT = 10 * 60 # 10 min
62
- _DEFAULT_ADVISORY_REFRESH_TIMEOUT = 15 * 60 # 15 min
63
-
64
-
65
- def create_credential_resolver(session, cache=None, region_name=None):
66
- """Create a default credential resolver.
67
-
68
- This creates a pre-configured credential resolver
69
- that includes the default lookup chain for
70
- credentials.
71
-
72
- """
73
- profile_name = session.get_config_variable('profile') or 'default'
74
- metadata_timeout = session.get_config_variable('metadata_service_timeout')
75
- num_attempts = session.get_config_variable('metadata_service_num_attempts')
76
- disable_env_vars = session.instance_variables().get('profile') is not None
77
-
78
- imds_config = {
79
- 'ec2_metadata_service_endpoint': session.get_config_variable(
80
- 'ec2_metadata_service_endpoint'
81
- ),
82
- 'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode(
83
- session
84
- ),
85
- 'ec2_credential_refresh_window': _DEFAULT_ADVISORY_REFRESH_TIMEOUT,
86
- }
87
-
88
- if cache is None:
89
- cache = {}
90
-
91
- env_provider = EnvProvider()
92
- container_provider = ContainerProvider()
93
- instance_metadata_provider = InstanceMetadataProvider(
94
- iam_role_fetcher=InstanceMetadataFetcher(
95
- timeout=metadata_timeout,
96
- num_attempts=num_attempts,
97
- user_agent=session.user_agent(),
98
- config=imds_config,
99
- )
100
- )
101
-
102
- profile_provider_builder = ProfileProviderBuilder(
103
- session, cache=cache, region_name=region_name
104
- )
105
- assume_role_provider = AssumeRoleProvider(
106
- load_config=lambda: session.full_config,
107
- client_creator=_get_client_creator(session, region_name),
108
- cache=cache,
109
- profile_name=profile_name,
110
- credential_sourcer=CanonicalNameCredentialSourcer(
111
- [env_provider, container_provider, instance_metadata_provider]
112
- ),
113
- profile_provider_builder=profile_provider_builder,
114
- )
115
-
116
- pre_profile = [
117
- env_provider,
118
- assume_role_provider,
119
- ]
120
- profile_providers = profile_provider_builder.providers(
121
- profile_name=profile_name,
122
- disable_env_vars=disable_env_vars,
123
- )
124
- post_profile = [
125
- OriginalEC2Provider(),
126
- BotoProvider(),
127
- container_provider,
128
- instance_metadata_provider,
129
- ]
130
- providers = pre_profile + profile_providers + post_profile
131
-
132
- if disable_env_vars:
133
- # An explicitly provided profile will negate an EnvProvider.
134
- # We will defer to providers that understand the "profile"
135
- # concept to retrieve credentials.
136
- # The one edge case if is all three values are provided via
137
- # env vars:
138
- # export AWS_ACCESS_KEY_ID=foo
139
- # export AWS_SECRET_ACCESS_KEY=bar
140
- # export AWS_PROFILE=baz
141
- # Then, just like our client() calls, the explicit credentials
142
- # will take precedence.
143
- #
144
- # This precedence is enforced by leaving the EnvProvider in the chain.
145
- # This means that the only way a "profile" would win is if the
146
- # EnvProvider does not return credentials, which is what we want
147
- # in this scenario.
148
- providers.remove(env_provider)
149
- logger.debug(
150
- 'Skipping environment variable credential check'
151
- ' because profile name was explicitly set.'
152
- )
153
-
154
- resolver = CredentialResolver(providers=providers)
155
- return resolver
156
-
157
-
158
- class ProfileProviderBuilder:
159
- """This class handles the creation of profile based providers.
160
-
161
- NOTE: This class is only intended for internal use.
162
-
163
- This class handles the creation and ordering of the various credential
164
- providers that primarly source their configuration from the shared config.
165
- This is needed to enable sharing between the default credential chain and
166
- the source profile chain created by the assume role provider.
167
- """
168
-
169
- def __init__(
170
- self, session, cache=None, region_name=None, sso_token_cache=None
171
- ):
172
- self._session = session
173
- self._cache = cache
174
- self._region_name = region_name
175
- self._sso_token_cache = sso_token_cache
176
-
177
- def providers(self, profile_name, disable_env_vars=False):
178
- return [
179
- self._create_web_identity_provider(
180
- profile_name,
181
- disable_env_vars,
182
- ),
183
- self._create_sso_provider(profile_name),
184
- self._create_shared_credential_provider(profile_name),
185
- self._create_process_provider(profile_name),
186
- self._create_config_provider(profile_name),
187
- ]
188
-
189
- def _create_process_provider(self, profile_name):
190
- return ProcessProvider(
191
- profile_name=profile_name,
192
- load_config=lambda: self._session.full_config,
193
- )
194
-
195
- def _create_shared_credential_provider(self, profile_name):
196
- credential_file = self._session.get_config_variable('credentials_file')
197
- return SharedCredentialProvider(
198
- profile_name=profile_name,
199
- creds_filename=credential_file,
200
- )
201
-
202
- def _create_config_provider(self, profile_name):
203
- config_file = self._session.get_config_variable('config_file')
204
- return ConfigProvider(
205
- profile_name=profile_name,
206
- config_filename=config_file,
207
- )
208
-
209
- def _create_web_identity_provider(self, profile_name, disable_env_vars):
210
- return AssumeRoleWithWebIdentityProvider(
211
- load_config=lambda: self._session.full_config,
212
- client_creator=_get_client_creator(
213
- self._session, self._region_name
214
- ),
215
- cache=self._cache,
216
- profile_name=profile_name,
217
- disable_env_vars=disable_env_vars,
218
- )
219
-
220
- def _create_sso_provider(self, profile_name):
221
- return SSOProvider(
222
- load_config=lambda: self._session.full_config,
223
- client_creator=self._session.create_client,
224
- profile_name=profile_name,
225
- cache=self._cache,
226
- token_cache=self._sso_token_cache,
227
- token_provider=SSOTokenProvider(
228
- self._session,
229
- cache=self._sso_token_cache,
230
- profile_name=profile_name,
231
- ),
232
- )
233
-
234
-
235
- def get_credentials(session):
236
- resolver = create_credential_resolver(session)
237
- return resolver.load_credentials()
238
-
239
-
240
- def _local_now():
241
- return datetime.datetime.now(tzlocal())
242
-
243
-
244
- def _parse_if_needed(value):
245
- if isinstance(value, datetime.datetime):
246
- return value
247
- return parse(value)
248
-
249
-
250
- def _serialize_if_needed(value, iso=False):
251
- if isinstance(value, datetime.datetime):
252
- if iso:
253
- return value.isoformat()
254
- return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
255
- return value
256
-
257
-
258
- def _get_client_creator(session, region_name):
259
- def client_creator(service_name, **kwargs):
260
- create_client_kwargs = {'region_name': region_name}
261
- create_client_kwargs.update(**kwargs)
262
- return session.create_client(service_name, **create_client_kwargs)
263
-
264
- return client_creator
265
-
266
-
267
- def create_assume_role_refresher(client, params):
268
- def refresh():
269
- response = client.assume_role(**params)
270
- credentials = response['Credentials']
271
- # We need to normalize the credential names to
272
- # the values expected by the refresh creds.
273
- return {
274
- 'access_key': credentials['AccessKeyId'],
275
- 'secret_key': credentials['SecretAccessKey'],
276
- 'token': credentials['SessionToken'],
277
- 'expiry_time': _serialize_if_needed(credentials['Expiration']),
278
- }
279
-
280
- return refresh
281
-
282
-
283
- def create_mfa_serial_refresher(actual_refresh):
284
- class _Refresher:
285
- def __init__(self, refresh):
286
- self._refresh = refresh
287
- self._has_been_called = False
288
-
289
- def __call__(self):
290
- if self._has_been_called:
291
- # We can explore an option in the future to support
292
- # reprompting for MFA, but for now we just error out
293
- # when the temp creds expire.
294
- raise RefreshWithMFAUnsupportedError()
295
- self._has_been_called = True
296
- return self._refresh()
297
-
298
- return _Refresher(actual_refresh)
299
-
300
-
301
- class Credentials:
302
- """
303
- Holds the credentials needed to authenticate requests.
304
-
305
- :param str access_key: The access key part of the credentials.
306
- :param str secret_key: The secret key part of the credentials.
307
- :param str token: The security token, valid only for session credentials.
308
- :param str method: A string which identifies where the credentials
309
- were found.
310
- """
311
-
312
- def __init__(self, access_key, secret_key, token=None, method=None):
313
- self.access_key = access_key
314
- self.secret_key = secret_key
315
- self.token = token
316
-
317
- if method is None:
318
- method = 'explicit'
319
- self.method = method
320
-
321
- self._normalize()
322
-
323
- def _normalize(self):
324
- # Keys would sometimes (accidentally) contain non-ascii characters.
325
- # It would cause a confusing UnicodeDecodeError in Python 2.
326
- # We explicitly convert them into unicode to avoid such error.
327
- #
328
- # Eventually the service will decide whether to accept the credential.
329
- # This also complies with the behavior in Python 3.
330
- self.access_key = botocore.compat.ensure_unicode(self.access_key)
331
- self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
332
-
333
- def get_frozen_credentials(self):
334
- return ReadOnlyCredentials(
335
- self.access_key, self.secret_key, self.token
336
- )
337
-
338
-
339
- class RefreshableCredentials(Credentials):
340
- """
341
- Holds the credentials needed to authenticate requests. In addition, it
342
- knows how to refresh itself.
343
-
344
- :param str access_key: The access key part of the credentials.
345
- :param str secret_key: The secret key part of the credentials.
346
- :param str token: The security token, valid only for session credentials.
347
- :param function refresh_using: Callback function to refresh the credentials.
348
- :param str method: A string which identifies where the credentials
349
- were found.
350
- :param function time_fetcher: Callback function to retrieve current time.
351
- """
352
-
353
- # The time at which we'll attempt to refresh, but not
354
- # block if someone else is refreshing.
355
- _advisory_refresh_timeout = _DEFAULT_ADVISORY_REFRESH_TIMEOUT
356
- # The time at which all threads will block waiting for
357
- # refreshed credentials.
358
- _mandatory_refresh_timeout = _DEFAULT_MANDATORY_REFRESH_TIMEOUT
359
-
360
- def __init__(
361
- self,
362
- access_key,
363
- secret_key,
364
- token,
365
- expiry_time,
366
- refresh_using,
367
- method,
368
- time_fetcher=_local_now,
369
- ):
370
- self._refresh_using = refresh_using
371
- self._access_key = access_key
372
- self._secret_key = secret_key
373
- self._token = token
374
- self._expiry_time = expiry_time
375
- self._time_fetcher = time_fetcher
376
- self._refresh_lock = threading.Lock()
377
- self.method = method
378
- self._frozen_credentials = ReadOnlyCredentials(
379
- access_key, secret_key, token
380
- )
381
- self._normalize()
382
-
383
- def _normalize(self):
384
- self._access_key = botocore.compat.ensure_unicode(self._access_key)
385
- self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
386
-
387
- @classmethod
388
- def create_from_metadata(cls, metadata, refresh_using, method):
389
- instance = cls(
390
- access_key=metadata['access_key'],
391
- secret_key=metadata['secret_key'],
392
- token=metadata['token'],
393
- expiry_time=cls._expiry_datetime(metadata['expiry_time']),
394
- method=method,
395
- refresh_using=refresh_using,
396
- )
397
- return instance
398
-
399
- @property
400
- def access_key(self):
401
- """Warning: Using this property can lead to race conditions if you
402
- access another property subsequently along the refresh boundary.
403
- Please use get_frozen_credentials instead.
404
- """
405
- self._refresh()
406
- return self._access_key
407
-
408
- @access_key.setter
409
- def access_key(self, value):
410
- self._access_key = value
411
-
412
- @property
413
- def secret_key(self):
414
- """Warning: Using this property can lead to race conditions if you
415
- access another property subsequently along the refresh boundary.
416
- Please use get_frozen_credentials instead.
417
- """
418
- self._refresh()
419
- return self._secret_key
420
-
421
- @secret_key.setter
422
- def secret_key(self, value):
423
- self._secret_key = value
424
-
425
- @property
426
- def token(self):
427
- """Warning: Using this property can lead to race conditions if you
428
- access another property subsequently along the refresh boundary.
429
- Please use get_frozen_credentials instead.
430
- """
431
- self._refresh()
432
- return self._token
433
-
434
- @token.setter
435
- def token(self, value):
436
- self._token = value
437
-
438
- def _seconds_remaining(self):
439
- delta = self._expiry_time - self._time_fetcher()
440
- return total_seconds(delta)
441
-
442
- def refresh_needed(self, refresh_in=None):
443
- """Check if a refresh is needed.
444
-
445
- A refresh is needed if the expiry time associated
446
- with the temporary credentials is less than the
447
- provided ``refresh_in``. If ``time_delta`` is not
448
- provided, ``self.advisory_refresh_needed`` will be used.
449
-
450
- For example, if your temporary credentials expire
451
- in 10 minutes and the provided ``refresh_in`` is
452
- ``15 * 60``, then this function will return ``True``.
453
-
454
- :type refresh_in: int
455
- :param refresh_in: The number of seconds before the
456
- credentials expire in which refresh attempts should
457
- be made.
458
-
459
- :return: True if refresh needed, False otherwise.
460
-
461
- """
462
- if self._expiry_time is None:
463
- # No expiration, so assume we don't need to refresh.
464
- return False
465
-
466
- if refresh_in is None:
467
- refresh_in = self._advisory_refresh_timeout
468
- # The credentials should be refreshed if they're going to expire
469
- # in less than 5 minutes.
470
- if self._seconds_remaining() >= refresh_in:
471
- # There's enough time left. Don't refresh.
472
- return False
473
- logger.debug("Credentials need to be refreshed.")
474
- return True
475
-
476
- def _is_expired(self):
477
- # Checks if the current credentials are expired.
478
- return self.refresh_needed(refresh_in=0)
479
-
480
- def _refresh(self):
481
- # In the common case where we don't need a refresh, we
482
- # can immediately exit and not require acquiring the
483
- # refresh lock.
484
- if not self.refresh_needed(self._advisory_refresh_timeout):
485
- return
486
-
487
- # acquire() doesn't accept kwargs, but False is indicating
488
- # that we should not block if we can't acquire the lock.
489
- # If we aren't able to acquire the lock, we'll trigger
490
- # the else clause.
491
- if self._refresh_lock.acquire(False):
492
- try:
493
- if not self.refresh_needed(self._advisory_refresh_timeout):
494
- return
495
- is_mandatory_refresh = self.refresh_needed(
496
- self._mandatory_refresh_timeout
497
- )
498
- self._protected_refresh(is_mandatory=is_mandatory_refresh)
499
- return
500
- finally:
501
- self._refresh_lock.release()
502
- elif self.refresh_needed(self._mandatory_refresh_timeout):
503
- # If we're within the mandatory refresh window,
504
- # we must block until we get refreshed credentials.
505
- with self._refresh_lock:
506
- if not self.refresh_needed(self._mandatory_refresh_timeout):
507
- return
508
- self._protected_refresh(is_mandatory=True)
509
-
510
- def _protected_refresh(self, is_mandatory):
511
- # precondition: this method should only be called if you've acquired
512
- # the self._refresh_lock.
513
- try:
514
- metadata = self._refresh_using()
515
- except Exception:
516
- period_name = 'mandatory' if is_mandatory else 'advisory'
517
- logger.warning(
518
- "Refreshing temporary credentials failed "
519
- "during %s refresh period.",
520
- period_name,
521
- exc_info=True,
522
- )
523
- if is_mandatory:
524
- # If this is a mandatory refresh, then
525
- # all errors that occur when we attempt to refresh
526
- # credentials are propagated back to the user.
527
- raise
528
- # Otherwise we'll just return.
529
- # The end result will be that we'll use the current
530
- # set of temporary credentials we have.
531
- return
532
- self._set_from_data(metadata)
533
- self._frozen_credentials = ReadOnlyCredentials(
534
- self._access_key, self._secret_key, self._token
535
- )
536
- if self._is_expired():
537
- # We successfully refreshed credentials but for whatever
538
- # reason, our refreshing function returned credentials
539
- # that are still expired. In this scenario, the only
540
- # thing we can do is let the user know and raise
541
- # an exception.
542
- msg = (
543
- "Credentials were refreshed, but the "
544
- "refreshed credentials are still expired."
545
- )
546
- logger.warning(msg)
547
- raise RuntimeError(msg)
548
-
549
- @staticmethod
550
- def _expiry_datetime(time_str):
551
- return parse(time_str)
552
-
553
- def _set_from_data(self, data):
554
- expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time']
555
- if not data:
556
- missing_keys = expected_keys
557
- else:
558
- missing_keys = [k for k in expected_keys if k not in data]
559
-
560
- if missing_keys:
561
- message = "Credential refresh failed, response did not contain: %s"
562
- raise CredentialRetrievalError(
563
- provider=self.method,
564
- error_msg=message % ', '.join(missing_keys),
565
- )
566
-
567
- self.access_key = data['access_key']
568
- self.secret_key = data['secret_key']
569
- self.token = data['token']
570
- self._expiry_time = parse(data['expiry_time'])
571
- logger.debug(
572
- "Retrieved credentials will expire at: %s", self._expiry_time
573
- )
574
- self._normalize()
575
-
576
- def get_frozen_credentials(self):
577
- """Return immutable credentials.
578
-
579
- The ``access_key``, ``secret_key``, and ``token`` properties
580
- on this class will always check and refresh credentials if
581
- needed before returning the particular credentials.
582
-
583
- This has an edge case where you can get inconsistent
584
- credentials. Imagine this:
585
-
586
- # Current creds are "t1"
587
- tmp.access_key ---> expired? no, so return t1.access_key
588
- # ---- time is now expired, creds need refreshing to "t2" ----
589
- tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
590
-
591
- This means we're using the access key from t1 with the secret key
592
- from t2. To fix this issue, you can request a frozen credential object
593
- which is guaranteed not to change.
594
-
595
- The frozen credentials returned from this method should be used
596
- immediately and then discarded. The typical usage pattern would
597
- be::
598
-
599
- creds = RefreshableCredentials(...)
600
- some_code = SomeSignerObject()
601
- # I'm about to sign the request.
602
- # The frozen credentials are only used for the
603
- # duration of generate_presigned_url and will be
604
- # immediately thrown away.
605
- request = some_code.sign_some_request(
606
- with_credentials=creds.get_frozen_credentials())
607
- print("Signed request:", request)
608
-
609
- """
610
- self._refresh()
611
- return self._frozen_credentials
612
-
613
-
614
- class DeferredRefreshableCredentials(RefreshableCredentials):
615
- """Refreshable credentials that don't require initial credentials.
616
-
617
- refresh_using will be called upon first access.
618
- """
619
-
620
- def __init__(self, refresh_using, method, time_fetcher=_local_now):
621
- self._refresh_using = refresh_using
622
- self._access_key = None
623
- self._secret_key = None
624
- self._token = None
625
- self._expiry_time = None
626
- self._time_fetcher = time_fetcher
627
- self._refresh_lock = threading.Lock()
628
- self.method = method
629
- self._frozen_credentials = None
630
-
631
- def refresh_needed(self, refresh_in=None):
632
- if self._frozen_credentials is None:
633
- return True
634
- return super().refresh_needed(refresh_in)
635
-
636
-
637
- class CachedCredentialFetcher:
638
- DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15
639
-
640
- def __init__(self, cache=None, expiry_window_seconds=None):
641
- if cache is None:
642
- cache = {}
643
- self._cache = cache
644
- self._cache_key = self._create_cache_key()
645
- if expiry_window_seconds is None:
646
- expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS
647
- self._expiry_window_seconds = expiry_window_seconds
648
-
649
- def _create_cache_key(self):
650
- raise NotImplementedError('_create_cache_key()')
651
-
652
- def _make_file_safe(self, filename):
653
- # Replace :, path sep, and / to make it the string filename safe.
654
- filename = filename.replace(':', '_').replace(os.path.sep, '_')
655
- return filename.replace('/', '_')
656
-
657
- def _get_credentials(self):
658
- raise NotImplementedError('_get_credentials()')
659
-
660
- def fetch_credentials(self):
661
- return self._get_cached_credentials()
662
-
663
- def _get_cached_credentials(self):
664
- """Get up-to-date credentials.
665
-
666
- This will check the cache for up-to-date credentials, calling assume
667
- role if none are available.
668
- """
669
- response = self._load_from_cache()
670
- if response is None:
671
- response = self._get_credentials()
672
- self._write_to_cache(response)
673
- else:
674
- logger.debug("Credentials for role retrieved from cache.")
675
-
676
- creds = response['Credentials']
677
- expiration = _serialize_if_needed(creds['Expiration'], iso=True)
678
- return {
679
- 'access_key': creds['AccessKeyId'],
680
- 'secret_key': creds['SecretAccessKey'],
681
- 'token': creds['SessionToken'],
682
- 'expiry_time': expiration,
683
- }
684
-
685
- def _load_from_cache(self):
686
- if self._cache_key in self._cache:
687
- creds = deepcopy(self._cache[self._cache_key])
688
- if not self._is_expired(creds):
689
- return creds
690
- else:
691
- logger.debug(
692
- "Credentials were found in cache, but they are expired."
693
- )
694
- return None
695
-
696
- def _write_to_cache(self, response):
697
- self._cache[self._cache_key] = deepcopy(response)
698
-
699
- def _is_expired(self, credentials):
700
- """Check if credentials are expired."""
701
- end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
702
- seconds = total_seconds(end_time - _local_now())
703
- return seconds < self._expiry_window_seconds
704
-
705
-
706
- class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher):
707
- def __init__(
708
- self,
709
- client_creator,
710
- role_arn,
711
- extra_args=None,
712
- cache=None,
713
- expiry_window_seconds=None,
714
- ):
715
- self._client_creator = client_creator
716
- self._role_arn = role_arn
717
-
718
- if extra_args is None:
719
- self._assume_kwargs = {}
720
- else:
721
- self._assume_kwargs = deepcopy(extra_args)
722
- self._assume_kwargs['RoleArn'] = self._role_arn
723
-
724
- self._role_session_name = self._assume_kwargs.get('RoleSessionName')
725
- self._using_default_session_name = False
726
- if not self._role_session_name:
727
- self._generate_assume_role_name()
728
-
729
- super().__init__(cache, expiry_window_seconds)
730
-
731
- def _generate_assume_role_name(self):
732
- self._role_session_name = 'botocore-session-%s' % (int(time.time()))
733
- self._assume_kwargs['RoleSessionName'] = self._role_session_name
734
- self._using_default_session_name = True
735
-
736
- def _create_cache_key(self):
737
- """Create a predictable cache key for the current configuration.
738
-
739
- The cache key is intended to be compatible with file names.
740
- """
741
- args = deepcopy(self._assume_kwargs)
742
-
743
- # The role session name gets randomly generated, so we don't want it
744
- # in the hash.
745
- if self._using_default_session_name:
746
- del args['RoleSessionName']
747
-
748
- if 'Policy' in args:
749
- # To have a predictable hash, the keys of the policy must be
750
- # sorted, so we have to load it here to make sure it gets sorted
751
- # later on.
752
- args['Policy'] = json.loads(args['Policy'])
753
-
754
- args = json.dumps(args, sort_keys=True)
755
- argument_hash = sha1(args.encode('utf-8')).hexdigest()
756
- return self._make_file_safe(argument_hash)
757
-
758
-
759
- class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher):
760
- def __init__(
761
- self,
762
- client_creator,
763
- source_credentials,
764
- role_arn,
765
- extra_args=None,
766
- mfa_prompter=None,
767
- cache=None,
768
- expiry_window_seconds=None,
769
- ):
770
- """
771
- :type client_creator: callable
772
- :param client_creator: A callable that creates a client taking
773
- arguments like ``Session.create_client``.
774
-
775
- :type source_credentials: Credentials
776
- :param source_credentials: The credentials to use to create the
777
- client for the call to AssumeRole.
778
-
779
- :type role_arn: str
780
- :param role_arn: The ARN of the role to be assumed.
781
-
782
- :type extra_args: dict
783
- :param extra_args: Any additional arguments to add to the assume
784
- role request using the format of the botocore operation.
785
- Possible keys include, but may not be limited to,
786
- DurationSeconds, Policy, SerialNumber, ExternalId and
787
- RoleSessionName.
788
-
789
- :type mfa_prompter: callable
790
- :param mfa_prompter: A callable that returns input provided by the
791
- user (i.e raw_input, getpass.getpass, etc.).
792
-
793
- :type cache: dict
794
- :param cache: An object that supports ``__getitem__``,
795
- ``__setitem__``, and ``__contains__``. An example of this is
796
- the ``JSONFileCache`` class in aws-cli.
797
-
798
- :type expiry_window_seconds: int
799
- :param expiry_window_seconds: The amount of time, in seconds,
800
- """
801
- self._source_credentials = source_credentials
802
- self._mfa_prompter = mfa_prompter
803
- if self._mfa_prompter is None:
804
- self._mfa_prompter = getpass.getpass
805
-
806
- super().__init__(
807
- client_creator,
808
- role_arn,
809
- extra_args=extra_args,
810
- cache=cache,
811
- expiry_window_seconds=expiry_window_seconds,
812
- )
813
-
814
- def _get_credentials(self):
815
- """Get credentials by calling assume role."""
816
- kwargs = self._assume_role_kwargs()
817
- client = self._create_client()
818
- return client.assume_role(**kwargs)
819
-
820
- def _assume_role_kwargs(self):
821
- """Get the arguments for assume role based on current configuration."""
822
- assume_role_kwargs = deepcopy(self._assume_kwargs)
823
-
824
- mfa_serial = assume_role_kwargs.get('SerialNumber')
825
-
826
- if mfa_serial is not None:
827
- prompt = 'Enter MFA code for %s: ' % mfa_serial
828
- token_code = self._mfa_prompter(prompt)
829
- assume_role_kwargs['TokenCode'] = token_code
830
-
831
- duration_seconds = assume_role_kwargs.get('DurationSeconds')
832
-
833
- if duration_seconds is not None:
834
- assume_role_kwargs['DurationSeconds'] = duration_seconds
835
-
836
- return assume_role_kwargs
837
-
838
- def _create_client(self):
839
- """Create an STS client using the source credentials."""
840
- frozen_credentials = self._source_credentials.get_frozen_credentials()
841
- return self._client_creator(
842
- 'sts',
843
- aws_access_key_id=frozen_credentials.access_key,
844
- aws_secret_access_key=frozen_credentials.secret_key,
845
- aws_session_token=frozen_credentials.token,
846
- )
847
-
848
-
849
- class AssumeRoleWithWebIdentityCredentialFetcher(
850
- BaseAssumeRoleCredentialFetcher
851
- ):
852
- def __init__(
853
- self,
854
- client_creator,
855
- web_identity_token_loader,
856
- role_arn,
857
- extra_args=None,
858
- cache=None,
859
- expiry_window_seconds=None,
860
- ):
861
- """
862
- :type client_creator: callable
863
- :param client_creator: A callable that creates a client taking
864
- arguments like ``Session.create_client``.
865
-
866
- :type web_identity_token_loader: callable
867
- :param web_identity_token_loader: A callable that takes no arguments
868
- and returns a web identity token str.
869
-
870
- :type role_arn: str
871
- :param role_arn: The ARN of the role to be assumed.
872
-
873
- :type extra_args: dict
874
- :param extra_args: Any additional arguments to add to the assume
875
- role request using the format of the botocore operation.
876
- Possible keys include, but may not be limited to,
877
- DurationSeconds, Policy, SerialNumber, ExternalId and
878
- RoleSessionName.
879
-
880
- :type cache: dict
881
- :param cache: An object that supports ``__getitem__``,
882
- ``__setitem__``, and ``__contains__``. An example of this is
883
- the ``JSONFileCache`` class in aws-cli.
884
-
885
- :type expiry_window_seconds: int
886
- :param expiry_window_seconds: The amount of time, in seconds,
887
- """
888
- self._web_identity_token_loader = web_identity_token_loader
889
-
890
- super().__init__(
891
- client_creator,
892
- role_arn,
893
- extra_args=extra_args,
894
- cache=cache,
895
- expiry_window_seconds=expiry_window_seconds,
896
- )
897
-
898
- def _get_credentials(self):
899
- """Get credentials by calling assume role."""
900
- kwargs = self._assume_role_kwargs()
901
- # Assume role with web identity does not require credentials other than
902
- # the token, explicitly configure the client to not sign requests.
903
- config = Config(signature_version=UNSIGNED)
904
- client = self._client_creator('sts', config=config)
905
- return client.assume_role_with_web_identity(**kwargs)
906
-
907
- def _assume_role_kwargs(self):
908
- """Get the arguments for assume role based on current configuration."""
909
- assume_role_kwargs = deepcopy(self._assume_kwargs)
910
- identity_token = self._web_identity_token_loader()
911
- assume_role_kwargs['WebIdentityToken'] = identity_token
912
-
913
- return assume_role_kwargs
914
-
915
-
916
- class CredentialProvider:
917
- # A short name to identify the provider within botocore.
918
- METHOD = None
919
-
920
- # A name to identify the provider for use in cross-sdk features like
921
- # assume role's `credential_source` configuration option. These names
922
- # are to be treated in a case-insensitive way. NOTE: any providers not
923
- # implemented in botocore MUST prefix their canonical names with
924
- # 'custom' or we DO NOT guarantee that it will work with any features
925
- # that this provides.
926
- CANONICAL_NAME = None
927
-
928
- def __init__(self, session=None):
929
- self.session = session
930
-
931
- def load(self):
932
- """
933
- Loads the credentials from their source & sets them on the object.
934
-
935
- Subclasses should implement this method (by reading from disk, the
936
- environment, the network or wherever), returning ``True`` if they were
937
- found & loaded.
938
-
939
- If not found, this method should return ``False``, indictating that the
940
- ``CredentialResolver`` should fall back to the next available method.
941
-
942
- The default implementation does nothing, assuming the user has set the
943
- ``access_key/secret_key/token`` themselves.
944
-
945
- :returns: Whether credentials were found & set
946
- :rtype: Credentials
947
- """
948
- return True
949
-
950
- def _extract_creds_from_mapping(self, mapping, *key_names):
951
- found = []
952
- for key_name in key_names:
953
- try:
954
- found.append(mapping[key_name])
955
- except KeyError:
956
- raise PartialCredentialsError(
957
- provider=self.METHOD, cred_var=key_name
958
- )
959
- return found
960
-
961
-
962
- class ProcessProvider(CredentialProvider):
963
-
964
- METHOD = 'custom-process'
965
-
966
- def __init__(self, profile_name, load_config, popen=subprocess.Popen):
967
- self._profile_name = profile_name
968
- self._load_config = load_config
969
- self._loaded_config = None
970
- self._popen = popen
971
-
972
- def load(self):
973
- credential_process = self._credential_process
974
- if credential_process is None:
975
- return
976
-
977
- creds_dict = self._retrieve_credentials_using(credential_process)
978
- if creds_dict.get('expiry_time') is not None:
979
- return RefreshableCredentials.create_from_metadata(
980
- creds_dict,
981
- lambda: self._retrieve_credentials_using(credential_process),
982
- self.METHOD,
983
- )
984
-
985
- return Credentials(
986
- access_key=creds_dict['access_key'],
987
- secret_key=creds_dict['secret_key'],
988
- token=creds_dict.get('token'),
989
- method=self.METHOD,
990
- )
991
-
992
- def _retrieve_credentials_using(self, credential_process):
993
- # We're not using shell=True, so we need to pass the
994
- # command and all arguments as a list.
995
- process_list = compat_shell_split(credential_process)
996
- p = self._popen(
997
- process_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE
998
- )
999
- stdout, stderr = p.communicate()
1000
- if p.returncode != 0:
1001
- raise CredentialRetrievalError(
1002
- provider=self.METHOD, error_msg=stderr.decode('utf-8')
1003
- )
1004
- parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
1005
- version = parsed.get('Version', '<Version key not provided>')
1006
- if version != 1:
1007
- raise CredentialRetrievalError(
1008
- provider=self.METHOD,
1009
- error_msg=(
1010
- f"Unsupported version '{version}' for credential process "
1011
- f"provider, supported versions: 1"
1012
- ),
1013
- )
1014
- try:
1015
- return {
1016
- 'access_key': parsed['AccessKeyId'],
1017
- 'secret_key': parsed['SecretAccessKey'],
1018
- 'token': parsed.get('SessionToken'),
1019
- 'expiry_time': parsed.get('Expiration'),
1020
- }
1021
- except KeyError as e:
1022
- raise CredentialRetrievalError(
1023
- provider=self.METHOD,
1024
- error_msg=f"Missing required key in response: {e}",
1025
- )
1026
-
1027
- @property
1028
- def _credential_process(self):
1029
- if self._loaded_config is None:
1030
- self._loaded_config = self._load_config()
1031
- profile_config = self._loaded_config.get('profiles', {}).get(
1032
- self._profile_name, {}
1033
- )
1034
- return profile_config.get('credential_process')
1035
-
1036
-
1037
- class InstanceMetadataProvider(CredentialProvider):
1038
- METHOD = 'iam-role'
1039
- CANONICAL_NAME = 'Ec2InstanceMetadata'
1040
-
1041
- def __init__(self, iam_role_fetcher):
1042
- self._role_fetcher = iam_role_fetcher
1043
-
1044
- def load(self):
1045
- fetcher = self._role_fetcher
1046
- # We do the first request, to see if we get useful data back.
1047
- # If not, we'll pass & move on to whatever's next in the credential
1048
- # chain.
1049
- metadata = fetcher.retrieve_iam_role_credentials()
1050
- if not metadata:
1051
- return None
1052
- logger.info(
1053
- 'Found credentials from IAM Role: %s', metadata['role_name']
1054
- )
1055
- # We manually set the data here, since we already made the request &
1056
- # have it. When the expiry is hit, the credentials will auto-refresh
1057
- # themselves.
1058
- creds = RefreshableCredentials.create_from_metadata(
1059
- metadata,
1060
- method=self.METHOD,
1061
- refresh_using=fetcher.retrieve_iam_role_credentials,
1062
- )
1063
- return creds
1064
-
1065
-
1066
- class EnvProvider(CredentialProvider):
1067
- METHOD = 'env'
1068
- CANONICAL_NAME = 'Environment'
1069
- ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
1070
- SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
1071
- # The token can come from either of these env var.
1072
- # AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
1073
- TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
1074
- EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
1075
-
1076
- def __init__(self, environ=None, mapping=None):
1077
- """
1078
-
1079
- :param environ: The environment variables (defaults to
1080
- ``os.environ`` if no value is provided).
1081
- :param mapping: An optional mapping of variable names to
1082
- environment variable names. Use this if you want to
1083
- change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
1084
- The dict can have up to 3 keys: ``access_key``, ``secret_key``,
1085
- ``session_token``.
1086
- """
1087
- if environ is None:
1088
- environ = os.environ
1089
- self.environ = environ
1090
- self._mapping = self._build_mapping(mapping)
1091
-
1092
- def _build_mapping(self, mapping):
1093
- # Mapping of variable name to env var name.
1094
- var_mapping = {}
1095
- if mapping is None:
1096
- # Use the class var default.
1097
- var_mapping['access_key'] = self.ACCESS_KEY
1098
- var_mapping['secret_key'] = self.SECRET_KEY
1099
- var_mapping['token'] = self.TOKENS
1100
- var_mapping['expiry_time'] = self.EXPIRY_TIME
1101
- else:
1102
- var_mapping['access_key'] = mapping.get(
1103
- 'access_key', self.ACCESS_KEY
1104
- )
1105
- var_mapping['secret_key'] = mapping.get(
1106
- 'secret_key', self.SECRET_KEY
1107
- )
1108
- var_mapping['token'] = mapping.get('token', self.TOKENS)
1109
- if not isinstance(var_mapping['token'], list):
1110
- var_mapping['token'] = [var_mapping['token']]
1111
- var_mapping['expiry_time'] = mapping.get(
1112
- 'expiry_time', self.EXPIRY_TIME
1113
- )
1114
- return var_mapping
1115
-
1116
- def load(self):
1117
- """
1118
- Search for credentials in explicit environment variables.
1119
- """
1120
-
1121
- access_key = self.environ.get(self._mapping['access_key'], '')
1122
-
1123
- if access_key:
1124
- logger.info('Found credentials in environment variables.')
1125
- fetcher = self._create_credentials_fetcher()
1126
- credentials = fetcher(require_expiry=False)
1127
-
1128
- expiry_time = credentials['expiry_time']
1129
- if expiry_time is not None:
1130
- expiry_time = parse(expiry_time)
1131
- return RefreshableCredentials(
1132
- credentials['access_key'],
1133
- credentials['secret_key'],
1134
- credentials['token'],
1135
- expiry_time,
1136
- refresh_using=fetcher,
1137
- method=self.METHOD,
1138
- )
1139
-
1140
- return Credentials(
1141
- credentials['access_key'],
1142
- credentials['secret_key'],
1143
- credentials['token'],
1144
- method=self.METHOD,
1145
- )
1146
- else:
1147
- return None
1148
-
1149
- def _create_credentials_fetcher(self):
1150
- mapping = self._mapping
1151
- method = self.METHOD
1152
- environ = self.environ
1153
-
1154
- def fetch_credentials(require_expiry=True):
1155
- credentials = {}
1156
-
1157
- access_key = environ.get(mapping['access_key'], '')
1158
- if not access_key:
1159
- raise PartialCredentialsError(
1160
- provider=method, cred_var=mapping['access_key']
1161
- )
1162
- credentials['access_key'] = access_key
1163
-
1164
- secret_key = environ.get(mapping['secret_key'], '')
1165
- if not secret_key:
1166
- raise PartialCredentialsError(
1167
- provider=method, cred_var=mapping['secret_key']
1168
- )
1169
- credentials['secret_key'] = secret_key
1170
-
1171
- credentials['token'] = None
1172
- for token_env_var in mapping['token']:
1173
- token = environ.get(token_env_var, '')
1174
- if token:
1175
- credentials['token'] = token
1176
- break
1177
-
1178
- credentials['expiry_time'] = None
1179
- expiry_time = environ.get(mapping['expiry_time'], '')
1180
- if expiry_time:
1181
- credentials['expiry_time'] = expiry_time
1182
- if require_expiry and not expiry_time:
1183
- raise PartialCredentialsError(
1184
- provider=method, cred_var=mapping['expiry_time']
1185
- )
1186
-
1187
- return credentials
1188
-
1189
- return fetch_credentials
1190
-
1191
-
1192
- class OriginalEC2Provider(CredentialProvider):
1193
- METHOD = 'ec2-credentials-file'
1194
- CANONICAL_NAME = 'Ec2Config'
1195
-
1196
- CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
1197
- ACCESS_KEY = 'AWSAccessKeyId'
1198
- SECRET_KEY = 'AWSSecretKey'
1199
-
1200
- def __init__(self, environ=None, parser=None):
1201
- if environ is None:
1202
- environ = os.environ
1203
- if parser is None:
1204
- parser = parse_key_val_file
1205
- self._environ = environ
1206
- self._parser = parser
1207
-
1208
- def load(self):
1209
- """
1210
- Search for a credential file used by original EC2 CLI tools.
1211
- """
1212
- if 'AWS_CREDENTIAL_FILE' in self._environ:
1213
- full_path = os.path.expanduser(
1214
- self._environ['AWS_CREDENTIAL_FILE']
1215
- )
1216
- creds = self._parser(full_path)
1217
- if self.ACCESS_KEY in creds:
1218
- logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
1219
- access_key = creds[self.ACCESS_KEY]
1220
- secret_key = creds[self.SECRET_KEY]
1221
- # EC2 creds file doesn't support session tokens.
1222
- return Credentials(access_key, secret_key, method=self.METHOD)
1223
- else:
1224
- return None
1225
-
1226
-
1227
- class SharedCredentialProvider(CredentialProvider):
1228
- METHOD = 'shared-credentials-file'
1229
- CANONICAL_NAME = 'SharedCredentials'
1230
-
1231
- ACCESS_KEY = 'aws_access_key_id'
1232
- SECRET_KEY = 'aws_secret_access_key'
1233
- # Same deal as the EnvProvider above. Botocore originally supported
1234
- # aws_security_token, but the SDKs are standardizing on aws_session_token
1235
- # so we support both.
1236
- TOKENS = ['aws_security_token', 'aws_session_token']
1237
-
1238
- def __init__(self, creds_filename, profile_name=None, ini_parser=None):
1239
- self._creds_filename = creds_filename
1240
- if profile_name is None:
1241
- profile_name = 'default'
1242
- self._profile_name = profile_name
1243
- if ini_parser is None:
1244
- ini_parser = botocore.configloader.raw_config_parse
1245
- self._ini_parser = ini_parser
1246
-
1247
- def load(self):
1248
- try:
1249
- available_creds = self._ini_parser(self._creds_filename)
1250
- except ConfigNotFound:
1251
- return None
1252
- if self._profile_name in available_creds:
1253
- config = available_creds[self._profile_name]
1254
- if self.ACCESS_KEY in config:
1255
- logger.info(
1256
- "Found credentials in shared credentials file: %s",
1257
- self._creds_filename,
1258
- )
1259
- access_key, secret_key = self._extract_creds_from_mapping(
1260
- config, self.ACCESS_KEY, self.SECRET_KEY
1261
- )
1262
- token = self._get_session_token(config)
1263
- return Credentials(
1264
- access_key, secret_key, token, method=self.METHOD
1265
- )
1266
-
1267
- def _get_session_token(self, config):
1268
- for token_envvar in self.TOKENS:
1269
- if token_envvar in config:
1270
- return config[token_envvar]
1271
-
1272
-
1273
- class ConfigProvider(CredentialProvider):
1274
- """INI based config provider with profile sections."""
1275
-
1276
- METHOD = 'config-file'
1277
- CANONICAL_NAME = 'SharedConfig'
1278
-
1279
- ACCESS_KEY = 'aws_access_key_id'
1280
- SECRET_KEY = 'aws_secret_access_key'
1281
- # Same deal as the EnvProvider above. Botocore originally supported
1282
- # aws_security_token, but the SDKs are standardizing on aws_session_token
1283
- # so we support both.
1284
- TOKENS = ['aws_security_token', 'aws_session_token']
1285
-
1286
- def __init__(self, config_filename, profile_name, config_parser=None):
1287
- """
1288
-
1289
- :param config_filename: The session configuration scoped to the current
1290
- profile. This is available via ``session.config``.
1291
- :param profile_name: The name of the current profile.
1292
- :param config_parser: A config parser callable.
1293
-
1294
- """
1295
- self._config_filename = config_filename
1296
- self._profile_name = profile_name
1297
- if config_parser is None:
1298
- config_parser = botocore.configloader.load_config
1299
- self._config_parser = config_parser
1300
-
1301
- def load(self):
1302
- """
1303
- If there is are credentials in the configuration associated with
1304
- the session, use those.
1305
- """
1306
- try:
1307
- full_config = self._config_parser(self._config_filename)
1308
- except ConfigNotFound:
1309
- return None
1310
- if self._profile_name in full_config['profiles']:
1311
- profile_config = full_config['profiles'][self._profile_name]
1312
- if self.ACCESS_KEY in profile_config:
1313
- logger.info(
1314
- "Credentials found in config file: %s",
1315
- self._config_filename,
1316
- )
1317
- access_key, secret_key = self._extract_creds_from_mapping(
1318
- profile_config, self.ACCESS_KEY, self.SECRET_KEY
1319
- )
1320
- token = self._get_session_token(profile_config)
1321
- return Credentials(
1322
- access_key, secret_key, token, method=self.METHOD
1323
- )
1324
- else:
1325
- return None
1326
-
1327
- def _get_session_token(self, profile_config):
1328
- for token_name in self.TOKENS:
1329
- if token_name in profile_config:
1330
- return profile_config[token_name]
1331
-
1332
-
1333
- class BotoProvider(CredentialProvider):
1334
- METHOD = 'boto-config'
1335
- CANONICAL_NAME = 'Boto2Config'
1336
-
1337
- BOTO_CONFIG_ENV = 'BOTO_CONFIG'
1338
- DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
1339
- ACCESS_KEY = 'aws_access_key_id'
1340
- SECRET_KEY = 'aws_secret_access_key'
1341
-
1342
- def __init__(self, environ=None, ini_parser=None):
1343
- if environ is None:
1344
- environ = os.environ
1345
- if ini_parser is None:
1346
- ini_parser = botocore.configloader.raw_config_parse
1347
- self._environ = environ
1348
- self._ini_parser = ini_parser
1349
-
1350
- def load(self):
1351
- """
1352
- Look for credentials in boto config file.
1353
- """
1354
- if self.BOTO_CONFIG_ENV in self._environ:
1355
- potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
1356
- else:
1357
- potential_locations = self.DEFAULT_CONFIG_FILENAMES
1358
- for filename in potential_locations:
1359
- try:
1360
- config = self._ini_parser(filename)
1361
- except ConfigNotFound:
1362
- # Move on to the next potential config file name.
1363
- continue
1364
- if 'Credentials' in config:
1365
- credentials = config['Credentials']
1366
- if self.ACCESS_KEY in credentials:
1367
- logger.info(
1368
- "Found credentials in boto config file: %s", filename
1369
- )
1370
- access_key, secret_key = self._extract_creds_from_mapping(
1371
- credentials, self.ACCESS_KEY, self.SECRET_KEY
1372
- )
1373
- return Credentials(
1374
- access_key, secret_key, method=self.METHOD
1375
- )
1376
-
1377
-
1378
- class AssumeRoleProvider(CredentialProvider):
1379
- METHOD = 'assume-role'
1380
- # The AssumeRole provider is logically part of the SharedConfig and
1381
- # SharedCredentials providers. Since the purpose of the canonical name
1382
- # is to provide cross-sdk compatibility, calling code will need to be
1383
- # aware that either of those providers should be tied to the AssumeRole
1384
- # provider as much as possible.
1385
- CANONICAL_NAME = None
1386
- ROLE_CONFIG_VAR = 'role_arn'
1387
- WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file'
1388
- # Credentials are considered expired (and will be refreshed) once the total
1389
- # remaining time left until the credentials expires is less than the
1390
- # EXPIRY_WINDOW.
1391
- EXPIRY_WINDOW_SECONDS = 60 * 15
1392
-
1393
- def __init__(
1394
- self,
1395
- load_config,
1396
- client_creator,
1397
- cache,
1398
- profile_name,
1399
- prompter=getpass.getpass,
1400
- credential_sourcer=None,
1401
- profile_provider_builder=None,
1402
- ):
1403
- """
1404
- :type load_config: callable
1405
- :param load_config: A function that accepts no arguments, and
1406
- when called, will return the full configuration dictionary
1407
- for the session (``session.full_config``).
1408
-
1409
- :type client_creator: callable
1410
- :param client_creator: A factory function that will create
1411
- a client when called. Has the same interface as
1412
- ``botocore.session.Session.create_client``.
1413
-
1414
- :type cache: dict
1415
- :param cache: An object that supports ``__getitem__``,
1416
- ``__setitem__``, and ``__contains__``. An example
1417
- of this is the ``JSONFileCache`` class in the CLI.
1418
-
1419
- :type profile_name: str
1420
- :param profile_name: The name of the profile.
1421
-
1422
- :type prompter: callable
1423
- :param prompter: A callable that returns input provided
1424
- by the user (i.e raw_input, getpass.getpass, etc.).
1425
-
1426
- :type credential_sourcer: CanonicalNameCredentialSourcer
1427
- :param credential_sourcer: A credential provider that takes a
1428
- configuration, which is used to provide the source credentials
1429
- for the STS call.
1430
- """
1431
- #: The cache used to first check for assumed credentials.
1432
- #: This is checked before making the AssumeRole API
1433
- #: calls and can be useful if you have short lived
1434
- #: scripts and you'd like to avoid calling AssumeRole
1435
- #: until the credentials are expired.
1436
- self.cache = cache
1437
- self._load_config = load_config
1438
- # client_creator is a callable that creates function.
1439
- # It's basically session.create_client
1440
- self._client_creator = client_creator
1441
- self._profile_name = profile_name
1442
- self._prompter = prompter
1443
- # The _loaded_config attribute will be populated from the
1444
- # load_config() function once the configuration is actually
1445
- # loaded. The reason we go through all this instead of just
1446
- # requiring that the loaded_config be passed to us is to that
1447
- # we can defer configuration loaded until we actually try
1448
- # to load credentials (as opposed to when the object is
1449
- # instantiated).
1450
- self._loaded_config = {}
1451
- self._credential_sourcer = credential_sourcer
1452
- self._profile_provider_builder = profile_provider_builder
1453
- self._visited_profiles = [self._profile_name]
1454
-
1455
- def load(self):
1456
- self._loaded_config = self._load_config()
1457
- profiles = self._loaded_config.get('profiles', {})
1458
- profile = profiles.get(self._profile_name, {})
1459
- if self._has_assume_role_config_vars(profile):
1460
- return self._load_creds_via_assume_role(self._profile_name)
1461
-
1462
- def _has_assume_role_config_vars(self, profile):
1463
- return (
1464
- self.ROLE_CONFIG_VAR in profile
1465
- and
1466
- # We need to ensure this provider doesn't look at a profile when
1467
- # the profile has configuration for web identity. Simply relying on
1468
- # the order in the credential chain is insufficient as it doesn't
1469
- # prevent the case when we're doing an assume role chain.
1470
- self.WEB_IDENTITY_TOKE_FILE_VAR not in profile
1471
- )
1472
-
1473
- def _load_creds_via_assume_role(self, profile_name):
1474
- role_config = self._get_role_config(profile_name)
1475
- source_credentials = self._resolve_source_credentials(
1476
- role_config, profile_name
1477
- )
1478
-
1479
- extra_args = {}
1480
- role_session_name = role_config.get('role_session_name')
1481
- if role_session_name is not None:
1482
- extra_args['RoleSessionName'] = role_session_name
1483
-
1484
- external_id = role_config.get('external_id')
1485
- if external_id is not None:
1486
- extra_args['ExternalId'] = external_id
1487
-
1488
- mfa_serial = role_config.get('mfa_serial')
1489
- if mfa_serial is not None:
1490
- extra_args['SerialNumber'] = mfa_serial
1491
-
1492
- duration_seconds = role_config.get('duration_seconds')
1493
- if duration_seconds is not None:
1494
- extra_args['DurationSeconds'] = duration_seconds
1495
-
1496
- fetcher = AssumeRoleCredentialFetcher(
1497
- client_creator=self._client_creator,
1498
- source_credentials=source_credentials,
1499
- role_arn=role_config['role_arn'],
1500
- extra_args=extra_args,
1501
- mfa_prompter=self._prompter,
1502
- cache=self.cache,
1503
- )
1504
- refresher = fetcher.fetch_credentials
1505
- if mfa_serial is not None:
1506
- refresher = create_mfa_serial_refresher(refresher)
1507
-
1508
- # The initial credentials are empty and the expiration time is set
1509
- # to now so that we can delay the call to assume role until it is
1510
- # strictly needed.
1511
- return DeferredRefreshableCredentials(
1512
- method=self.METHOD,
1513
- refresh_using=refresher,
1514
- time_fetcher=_local_now,
1515
- )
1516
-
1517
- def _get_role_config(self, profile_name):
1518
- """Retrieves and validates the role configuration for the profile."""
1519
- profiles = self._loaded_config.get('profiles', {})
1520
-
1521
- profile = profiles[profile_name]
1522
- source_profile = profile.get('source_profile')
1523
- role_arn = profile['role_arn']
1524
- credential_source = profile.get('credential_source')
1525
- mfa_serial = profile.get('mfa_serial')
1526
- external_id = profile.get('external_id')
1527
- role_session_name = profile.get('role_session_name')
1528
- duration_seconds = profile.get('duration_seconds')
1529
-
1530
- role_config = {
1531
- 'role_arn': role_arn,
1532
- 'external_id': external_id,
1533
- 'mfa_serial': mfa_serial,
1534
- 'role_session_name': role_session_name,
1535
- 'source_profile': source_profile,
1536
- 'credential_source': credential_source,
1537
- }
1538
-
1539
- if duration_seconds is not None:
1540
- try:
1541
- role_config['duration_seconds'] = int(duration_seconds)
1542
- except ValueError:
1543
- pass
1544
-
1545
- # Either the credential source or the source profile must be
1546
- # specified, but not both.
1547
- if credential_source is not None and source_profile is not None:
1548
- raise InvalidConfigError(
1549
- error_msg=(
1550
- 'The profile "%s" contains both source_profile and '
1551
- 'credential_source.' % profile_name
1552
- )
1553
- )
1554
- elif credential_source is None and source_profile is None:
1555
- raise PartialCredentialsError(
1556
- provider=self.METHOD,
1557
- cred_var='source_profile or credential_source',
1558
- )
1559
- elif credential_source is not None:
1560
- self._validate_credential_source(profile_name, credential_source)
1561
- else:
1562
- self._validate_source_profile(profile_name, source_profile)
1563
-
1564
- return role_config
1565
-
1566
- def _validate_credential_source(self, parent_profile, credential_source):
1567
- if self._credential_sourcer is None:
1568
- raise InvalidConfigError(
1569
- error_msg=(
1570
- f"The credential_source \"{credential_source}\" is specified "
1571
- f"in profile \"{parent_profile}\", "
1572
- f"but no source provider was configured."
1573
- )
1574
- )
1575
- if not self._credential_sourcer.is_supported(credential_source):
1576
- raise InvalidConfigError(
1577
- error_msg=(
1578
- f"The credential source \"{credential_source}\" referenced "
1579
- f"in profile \"{parent_profile}\" is not valid."
1580
- )
1581
- )
1582
-
1583
- def _source_profile_has_credentials(self, profile):
1584
- return any(
1585
- [
1586
- self._has_static_credentials(profile),
1587
- self._has_assume_role_config_vars(profile),
1588
- ]
1589
- )
1590
-
1591
- def _validate_source_profile(
1592
- self, parent_profile_name, source_profile_name
1593
- ):
1594
- profiles = self._loaded_config.get('profiles', {})
1595
- if source_profile_name not in profiles:
1596
- raise InvalidConfigError(
1597
- error_msg=(
1598
- f"The source_profile \"{source_profile_name}\" referenced in "
1599
- f"the profile \"{parent_profile_name}\" does not exist."
1600
- )
1601
- )
1602
-
1603
- source_profile = profiles[source_profile_name]
1604
-
1605
- # Make sure we aren't going into an infinite loop. If we haven't
1606
- # visited the profile yet, we're good.
1607
- if source_profile_name not in self._visited_profiles:
1608
- return
1609
-
1610
- # If we have visited the profile and the profile isn't simply
1611
- # referencing itself, that's an infinite loop.
1612
- if source_profile_name != parent_profile_name:
1613
- raise InfiniteLoopConfigError(
1614
- source_profile=source_profile_name,
1615
- visited_profiles=self._visited_profiles,
1616
- )
1617
-
1618
- # A profile is allowed to reference itself so that it can source
1619
- # static credentials and have configuration all in the same
1620
- # profile. This will only ever work for the top level assume
1621
- # role because the static credentials will otherwise take
1622
- # precedence.
1623
- if not self._has_static_credentials(source_profile):
1624
- raise InfiniteLoopConfigError(
1625
- source_profile=source_profile_name,
1626
- visited_profiles=self._visited_profiles,
1627
- )
1628
-
1629
- def _has_static_credentials(self, profile):
1630
- static_keys = ['aws_secret_access_key', 'aws_access_key_id']
1631
- return any(static_key in profile for static_key in static_keys)
1632
-
1633
- def _resolve_source_credentials(self, role_config, profile_name):
1634
- credential_source = role_config.get('credential_source')
1635
- if credential_source is not None:
1636
- return self._resolve_credentials_from_source(
1637
- credential_source, profile_name
1638
- )
1639
-
1640
- source_profile = role_config['source_profile']
1641
- self._visited_profiles.append(source_profile)
1642
- return self._resolve_credentials_from_profile(source_profile)
1643
-
1644
- def _resolve_credentials_from_profile(self, profile_name):
1645
- profiles = self._loaded_config.get('profiles', {})
1646
- profile = profiles[profile_name]
1647
-
1648
- if (
1649
- self._has_static_credentials(profile)
1650
- and not self._profile_provider_builder
1651
- ):
1652
- # This is only here for backwards compatibility. If this provider
1653
- # isn't given a profile provider builder we still want to be able
1654
- # handle the basic static credential case as we would before the
1655
- # provile provider builder parameter was added.
1656
- return self._resolve_static_credentials_from_profile(profile)
1657
- elif self._has_static_credentials(
1658
- profile
1659
- ) or not self._has_assume_role_config_vars(profile):
1660
- profile_providers = self._profile_provider_builder.providers(
1661
- profile_name=profile_name,
1662
- disable_env_vars=True,
1663
- )
1664
- profile_chain = CredentialResolver(profile_providers)
1665
- credentials = profile_chain.load_credentials()
1666
- if credentials is None:
1667
- error_message = (
1668
- 'The source profile "%s" must have credentials.'
1669
- )
1670
- raise InvalidConfigError(
1671
- error_msg=error_message % profile_name,
1672
- )
1673
- return credentials
1674
-
1675
- return self._load_creds_via_assume_role(profile_name)
1676
-
1677
- def _resolve_static_credentials_from_profile(self, profile):
1678
- try:
1679
- return Credentials(
1680
- access_key=profile['aws_access_key_id'],
1681
- secret_key=profile['aws_secret_access_key'],
1682
- token=profile.get('aws_session_token'),
1683
- )
1684
- except KeyError as e:
1685
- raise PartialCredentialsError(
1686
- provider=self.METHOD, cred_var=str(e)
1687
- )
1688
-
1689
- def _resolve_credentials_from_source(
1690
- self, credential_source, profile_name
1691
- ):
1692
- credentials = self._credential_sourcer.source_credentials(
1693
- credential_source
1694
- )
1695
- if credentials is None:
1696
- raise CredentialRetrievalError(
1697
- provider=credential_source,
1698
- error_msg=(
1699
- 'No credentials found in credential_source referenced '
1700
- 'in profile %s' % profile_name
1701
- ),
1702
- )
1703
- return credentials
1704
-
1705
-
1706
- class AssumeRoleWithWebIdentityProvider(CredentialProvider):
1707
- METHOD = 'assume-role-with-web-identity'
1708
- CANONICAL_NAME = None
1709
- _CONFIG_TO_ENV_VAR = {
1710
- 'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE',
1711
- 'role_session_name': 'AWS_ROLE_SESSION_NAME',
1712
- 'role_arn': 'AWS_ROLE_ARN',
1713
- }
1714
-
1715
- def __init__(
1716
- self,
1717
- load_config,
1718
- client_creator,
1719
- profile_name,
1720
- cache=None,
1721
- disable_env_vars=False,
1722
- token_loader_cls=None,
1723
- ):
1724
- self.cache = cache
1725
- self._load_config = load_config
1726
- self._client_creator = client_creator
1727
- self._profile_name = profile_name
1728
- self._profile_config = None
1729
- self._disable_env_vars = disable_env_vars
1730
- if token_loader_cls is None:
1731
- token_loader_cls = FileWebIdentityTokenLoader
1732
- self._token_loader_cls = token_loader_cls
1733
-
1734
- def load(self):
1735
- return self._assume_role_with_web_identity()
1736
-
1737
- def _get_profile_config(self, key):
1738
- if self._profile_config is None:
1739
- loaded_config = self._load_config()
1740
- profiles = loaded_config.get('profiles', {})
1741
- self._profile_config = profiles.get(self._profile_name, {})
1742
- return self._profile_config.get(key)
1743
-
1744
- def _get_env_config(self, key):
1745
- if self._disable_env_vars:
1746
- return None
1747
- env_key = self._CONFIG_TO_ENV_VAR.get(key)
1748
- if env_key and env_key in os.environ:
1749
- return os.environ[env_key]
1750
- return None
1751
-
1752
- def _get_config(self, key):
1753
- env_value = self._get_env_config(key)
1754
- if env_value is not None:
1755
- return env_value
1756
- return self._get_profile_config(key)
1757
-
1758
- def _assume_role_with_web_identity(self):
1759
- token_path = self._get_config('web_identity_token_file')
1760
- if not token_path:
1761
- return None
1762
- token_loader = self._token_loader_cls(token_path)
1763
-
1764
- role_arn = self._get_config('role_arn')
1765
- if not role_arn:
1766
- error_msg = (
1767
- 'The provided profile or the current environment is '
1768
- 'configured to assume role with web identity but has no '
1769
- 'role ARN configured. Ensure that the profile has the role_arn'
1770
- 'configuration set or the AWS_ROLE_ARN env var is set.'
1771
- )
1772
- raise InvalidConfigError(error_msg=error_msg)
1773
-
1774
- extra_args = {}
1775
- role_session_name = self._get_config('role_session_name')
1776
- if role_session_name is not None:
1777
- extra_args['RoleSessionName'] = role_session_name
1778
-
1779
- fetcher = AssumeRoleWithWebIdentityCredentialFetcher(
1780
- client_creator=self._client_creator,
1781
- web_identity_token_loader=token_loader,
1782
- role_arn=role_arn,
1783
- extra_args=extra_args,
1784
- cache=self.cache,
1785
- )
1786
- # The initial credentials are empty and the expiration time is set
1787
- # to now so that we can delay the call to assume role until it is
1788
- # strictly needed.
1789
- return DeferredRefreshableCredentials(
1790
- method=self.METHOD,
1791
- refresh_using=fetcher.fetch_credentials,
1792
- )
1793
-
1794
-
1795
- class CanonicalNameCredentialSourcer:
1796
- def __init__(self, providers):
1797
- self._providers = providers
1798
-
1799
- def is_supported(self, source_name):
1800
- """Validates a given source name.
1801
-
1802
- :type source_name: str
1803
- :param source_name: The value of credential_source in the config
1804
- file. This is the canonical name of the credential provider.
1805
-
1806
- :rtype: bool
1807
- :returns: True if the credential provider is supported,
1808
- False otherwise.
1809
- """
1810
- return source_name in [p.CANONICAL_NAME for p in self._providers]
1811
-
1812
- def source_credentials(self, source_name):
1813
- """Loads source credentials based on the provided configuration.
1814
-
1815
- :type source_name: str
1816
- :param source_name: The value of credential_source in the config
1817
- file. This is the canonical name of the credential provider.
1818
-
1819
- :rtype: Credentials
1820
- """
1821
- source = self._get_provider(source_name)
1822
- if isinstance(source, CredentialResolver):
1823
- return source.load_credentials()
1824
- return source.load()
1825
-
1826
- def _get_provider(self, canonical_name):
1827
- """Return a credential provider by its canonical name.
1828
-
1829
- :type canonical_name: str
1830
- :param canonical_name: The canonical name of the provider.
1831
-
1832
- :raises UnknownCredentialError: Raised if no
1833
- credential provider by the provided name
1834
- is found.
1835
- """
1836
- provider = self._get_provider_by_canonical_name(canonical_name)
1837
-
1838
- # The AssumeRole provider should really be part of the SharedConfig
1839
- # provider rather than being its own thing, but it is not. It is
1840
- # effectively part of both the SharedConfig provider and the
1841
- # SharedCredentials provider now due to the way it behaves.
1842
- # Therefore if we want either of those providers we should return
1843
- # the AssumeRole provider with it.
1844
- if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
1845
- assume_role_provider = self._get_provider_by_method('assume-role')
1846
- if assume_role_provider is not None:
1847
- # The SharedConfig or SharedCredentials provider may not be
1848
- # present if it was removed for some reason, but the
1849
- # AssumeRole provider could still be present. In that case,
1850
- # return the assume role provider by itself.
1851
- if provider is None:
1852
- return assume_role_provider
1853
-
1854
- # If both are present, return them both as a
1855
- # CredentialResolver so that calling code can treat them as
1856
- # a single entity.
1857
- return CredentialResolver([assume_role_provider, provider])
1858
-
1859
- if provider is None:
1860
- raise UnknownCredentialError(name=canonical_name)
1861
-
1862
- return provider
1863
-
1864
- def _get_provider_by_canonical_name(self, canonical_name):
1865
- """Return a credential provider by its canonical name.
1866
-
1867
- This function is strict, it does not attempt to address
1868
- compatibility issues.
1869
- """
1870
- for provider in self._providers:
1871
- name = provider.CANONICAL_NAME
1872
- # Canonical names are case-insensitive
1873
- if name and name.lower() == canonical_name.lower():
1874
- return provider
1875
-
1876
- def _get_provider_by_method(self, method):
1877
- """Return a credential provider by its METHOD name."""
1878
- for provider in self._providers:
1879
- if provider.METHOD == method:
1880
- return provider
1881
-
1882
-
1883
- class ContainerProvider(CredentialProvider):
1884
- METHOD = 'container-role'
1885
- CANONICAL_NAME = 'EcsContainer'
1886
- ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
1887
- ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
1888
- ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
1889
-
1890
- def __init__(self, environ=None, fetcher=None):
1891
- if environ is None:
1892
- environ = os.environ
1893
- if fetcher is None:
1894
- fetcher = ContainerMetadataFetcher()
1895
- self._environ = environ
1896
- self._fetcher = fetcher
1897
-
1898
- def load(self):
1899
- # This cred provider is only triggered if the self.ENV_VAR is set,
1900
- # which only happens if you opt into this feature.
1901
- if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
1902
- return self._retrieve_or_fail()
1903
-
1904
- def _retrieve_or_fail(self):
1905
- if self._provided_relative_uri():
1906
- full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
1907
- else:
1908
- full_uri = self._environ[self.ENV_VAR_FULL]
1909
- headers = self._build_headers()
1910
- fetcher = self._create_fetcher(full_uri, headers)
1911
- creds = fetcher()
1912
- return RefreshableCredentials(
1913
- access_key=creds['access_key'],
1914
- secret_key=creds['secret_key'],
1915
- token=creds['token'],
1916
- method=self.METHOD,
1917
- expiry_time=_parse_if_needed(creds['expiry_time']),
1918
- refresh_using=fetcher,
1919
- )
1920
-
1921
- def _build_headers(self):
1922
- auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
1923
- if auth_token is not None:
1924
- return {'Authorization': auth_token}
1925
-
1926
- def _create_fetcher(self, full_uri, headers):
1927
- def fetch_creds():
1928
- try:
1929
- response = self._fetcher.retrieve_full_uri(
1930
- full_uri, headers=headers
1931
- )
1932
- except MetadataRetrievalError as e:
1933
- logger.debug(
1934
- "Error retrieving container metadata: %s", e, exc_info=True
1935
- )
1936
- raise CredentialRetrievalError(
1937
- provider=self.METHOD, error_msg=str(e)
1938
- )
1939
- return {
1940
- 'access_key': response['AccessKeyId'],
1941
- 'secret_key': response['SecretAccessKey'],
1942
- 'token': response['Token'],
1943
- 'expiry_time': response['Expiration'],
1944
- }
1945
-
1946
- return fetch_creds
1947
-
1948
- def _provided_relative_uri(self):
1949
- return self.ENV_VAR in self._environ
1950
-
1951
-
1952
- class CredentialResolver:
1953
- def __init__(self, providers):
1954
- """
1955
-
1956
- :param providers: A list of ``CredentialProvider`` instances.
1957
-
1958
- """
1959
- self.providers = providers
1960
-
1961
- def insert_before(self, name, credential_provider):
1962
- """
1963
- Inserts a new instance of ``CredentialProvider`` into the chain that
1964
- will be tried before an existing one.
1965
-
1966
- :param name: The short name of the credentials you'd like to insert the
1967
- new credentials before. (ex. ``env`` or ``config``). Existing names
1968
- & ordering can be discovered via ``self.available_methods``.
1969
- :type name: string
1970
-
1971
- :param cred_instance: An instance of the new ``Credentials`` object
1972
- you'd like to add to the chain.
1973
- :type cred_instance: A subclass of ``Credentials``
1974
- """
1975
- try:
1976
- offset = [p.METHOD for p in self.providers].index(name)
1977
- except ValueError:
1978
- raise UnknownCredentialError(name=name)
1979
- self.providers.insert(offset, credential_provider)
1980
-
1981
- def insert_after(self, name, credential_provider):
1982
- """
1983
- Inserts a new type of ``Credentials`` instance into the chain that will
1984
- be tried after an existing one.
1985
-
1986
- :param name: The short name of the credentials you'd like to insert the
1987
- new credentials after. (ex. ``env`` or ``config``). Existing names
1988
- & ordering can be discovered via ``self.available_methods``.
1989
- :type name: string
1990
-
1991
- :param cred_instance: An instance of the new ``Credentials`` object
1992
- you'd like to add to the chain.
1993
- :type cred_instance: A subclass of ``Credentials``
1994
- """
1995
- offset = self._get_provider_offset(name)
1996
- self.providers.insert(offset + 1, credential_provider)
1997
-
1998
- def remove(self, name):
1999
- """
2000
- Removes a given ``Credentials`` instance from the chain.
2001
-
2002
- :param name: The short name of the credentials instance to remove.
2003
- :type name: string
2004
- """
2005
- available_methods = [p.METHOD for p in self.providers]
2006
- if name not in available_methods:
2007
- # It's not present. Fail silently.
2008
- return
2009
-
2010
- offset = available_methods.index(name)
2011
- self.providers.pop(offset)
2012
-
2013
- def get_provider(self, name):
2014
- """Return a credential provider by name.
2015
-
2016
- :type name: str
2017
- :param name: The name of the provider.
2018
-
2019
- :raises UnknownCredentialError: Raised if no
2020
- credential provider by the provided name
2021
- is found.
2022
- """
2023
- return self.providers[self._get_provider_offset(name)]
2024
-
2025
- def _get_provider_offset(self, name):
2026
- try:
2027
- return [p.METHOD for p in self.providers].index(name)
2028
- except ValueError:
2029
- raise UnknownCredentialError(name=name)
2030
-
2031
- def load_credentials(self):
2032
- """
2033
- Goes through the credentials chain, returning the first ``Credentials``
2034
- that could be loaded.
2035
- """
2036
- # First provider to return a non-None response wins.
2037
- for provider in self.providers:
2038
- logger.debug("Looking for credentials via: %s", provider.METHOD)
2039
- creds = provider.load()
2040
- if creds is not None:
2041
- return creds
2042
-
2043
- # If we got here, no credentials could be found.
2044
- # This feels like it should be an exception, but historically, ``None``
2045
- # is returned.
2046
- #
2047
- # +1
2048
- # -js
2049
- return None
2050
-
2051
-
2052
- class SSOCredentialFetcher(CachedCredentialFetcher):
2053
- _UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
2054
-
2055
- def __init__(
2056
- self,
2057
- start_url,
2058
- sso_region,
2059
- role_name,
2060
- account_id,
2061
- client_creator,
2062
- token_loader=None,
2063
- cache=None,
2064
- expiry_window_seconds=None,
2065
- token_provider=None,
2066
- sso_session_name=None,
2067
- ):
2068
- self._client_creator = client_creator
2069
- self._sso_region = sso_region
2070
- self._role_name = role_name
2071
- self._account_id = account_id
2072
- self._start_url = start_url
2073
- self._token_loader = token_loader
2074
- self._token_provider = token_provider
2075
- self._sso_session_name = sso_session_name
2076
- super().__init__(cache, expiry_window_seconds)
2077
-
2078
- def _create_cache_key(self):
2079
- """Create a predictable cache key for the current configuration.
2080
-
2081
- The cache key is intended to be compatible with file names.
2082
- """
2083
- args = {
2084
- 'roleName': self._role_name,
2085
- 'accountId': self._account_id,
2086
- }
2087
- if self._sso_session_name:
2088
- args['sessionName'] = self._sso_session_name
2089
- else:
2090
- args['startUrl'] = self._start_url
2091
- # NOTE: It would be good to hoist this cache key construction logic
2092
- # into the CachedCredentialFetcher class as we should be consistent.
2093
- # Unfortunately, the current assume role fetchers that sub class don't
2094
- # pass separators resulting in non-minified JSON. In the long term,
2095
- # all fetchers should use the below caching scheme.
2096
- args = json.dumps(args, sort_keys=True, separators=(',', ':'))
2097
- argument_hash = sha1(args.encode('utf-8')).hexdigest()
2098
- return self._make_file_safe(argument_hash)
2099
-
2100
- def _parse_timestamp(self, timestamp_ms):
2101
- # fromtimestamp expects seconds so: milliseconds / 1000 = seconds
2102
- timestamp_seconds = timestamp_ms / 1000.0
2103
- timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
2104
- return timestamp.strftime(self._UTC_DATE_FORMAT)
2105
-
2106
- def _get_credentials(self):
2107
- """Get credentials by calling SSO get role credentials."""
2108
- config = Config(
2109
- signature_version=UNSIGNED,
2110
- region_name=self._sso_region,
2111
- )
2112
- client = self._client_creator('sso', config=config)
2113
- if self._token_provider:
2114
- initial_token_data = self._token_provider.load_token()
2115
- token = initial_token_data.get_frozen_token().token
2116
- else:
2117
- token = self._token_loader(self._start_url)['accessToken']
2118
-
2119
- kwargs = {
2120
- 'roleName': self._role_name,
2121
- 'accountId': self._account_id,
2122
- 'accessToken': token,
2123
- }
2124
- try:
2125
- response = client.get_role_credentials(**kwargs)
2126
- except client.exceptions.UnauthorizedException:
2127
- raise UnauthorizedSSOTokenError()
2128
- credentials = response['roleCredentials']
2129
-
2130
- credentials = {
2131
- 'ProviderType': 'sso',
2132
- 'Credentials': {
2133
- 'AccessKeyId': credentials['accessKeyId'],
2134
- 'SecretAccessKey': credentials['secretAccessKey'],
2135
- 'SessionToken': credentials['sessionToken'],
2136
- 'Expiration': self._parse_timestamp(credentials['expiration']),
2137
- },
2138
- }
2139
- return credentials
2140
-
2141
-
2142
- class SSOProvider(CredentialProvider):
2143
- METHOD = 'sso'
2144
-
2145
- _SSO_TOKEN_CACHE_DIR = os.path.expanduser(
2146
- os.path.join('~', '.aws', 'sso', 'cache')
2147
- )
2148
- _PROFILE_REQUIRED_CONFIG_VARS = (
2149
- 'sso_role_name',
2150
- 'sso_account_id',
2151
- )
2152
- _SSO_REQUIRED_CONFIG_VARS = (
2153
- 'sso_start_url',
2154
- 'sso_region',
2155
- )
2156
- _ALL_REQUIRED_CONFIG_VARS = (
2157
- _PROFILE_REQUIRED_CONFIG_VARS + _SSO_REQUIRED_CONFIG_VARS
2158
- )
2159
-
2160
- def __init__(
2161
- self,
2162
- load_config,
2163
- client_creator,
2164
- profile_name,
2165
- cache=None,
2166
- token_cache=None,
2167
- token_provider=None,
2168
- ):
2169
- if token_cache is None:
2170
- token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
2171
- self._token_cache = token_cache
2172
- self._token_provider = token_provider
2173
- if cache is None:
2174
- cache = {}
2175
- self.cache = cache
2176
- self._load_config = load_config
2177
- self._client_creator = client_creator
2178
- self._profile_name = profile_name
2179
-
2180
- def _load_sso_config(self):
2181
- loaded_config = self._load_config()
2182
- profiles = loaded_config.get('profiles', {})
2183
- profile_name = self._profile_name
2184
- profile_config = profiles.get(self._profile_name, {})
2185
- sso_sessions = loaded_config.get('sso_sessions', {})
2186
-
2187
- # Role name & Account ID indicate the cred provider should be used
2188
- if all(
2189
- c not in profile_config for c in self._PROFILE_REQUIRED_CONFIG_VARS
2190
- ):
2191
- return None
2192
-
2193
- resolved_config, extra_reqs = self._resolve_sso_session_reference(
2194
- profile_config, sso_sessions
2195
- )
2196
-
2197
- config = {}
2198
- missing_config_vars = []
2199
- all_required_configs = self._ALL_REQUIRED_CONFIG_VARS + extra_reqs
2200
- for config_var in all_required_configs:
2201
- if config_var in resolved_config:
2202
- config[config_var] = resolved_config[config_var]
2203
- else:
2204
- missing_config_vars.append(config_var)
2205
-
2206
- if missing_config_vars:
2207
- missing = ', '.join(missing_config_vars)
2208
- raise InvalidConfigError(
2209
- error_msg=(
2210
- 'The profile "%s" is configured to use SSO but is missing '
2211
- 'required configuration: %s' % (profile_name, missing)
2212
- )
2213
- )
2214
- return config
2215
-
2216
- def _resolve_sso_session_reference(self, profile_config, sso_sessions):
2217
- sso_session_name = profile_config.get('sso_session')
2218
- if sso_session_name is None:
2219
- # No reference to resolve, proceed with legacy flow
2220
- return profile_config, ()
2221
-
2222
- if sso_session_name not in sso_sessions:
2223
- error_msg = f'The specified sso-session does not exist: "{sso_session_name}"'
2224
- raise InvalidConfigError(error_msg=error_msg)
2225
-
2226
- config = profile_config.copy()
2227
- session = sso_sessions[sso_session_name]
2228
- for config_var, val in session.items():
2229
- # Validate any keys referenced in both profile and sso_session match
2230
- if config.get(config_var, val) != val:
2231
- error_msg = (
2232
- f"The value for {config_var} is inconsistent between "
2233
- f"profile ({config[config_var]}) and sso-session ({val})."
2234
- )
2235
- raise InvalidConfigError(error_msg=error_msg)
2236
- config[config_var] = val
2237
- return config, ('sso_session',)
2238
-
2239
- def load(self):
2240
- sso_config = self._load_sso_config()
2241
- if not sso_config:
2242
- return None
2243
-
2244
- fetcher_kwargs = {
2245
- 'start_url': sso_config['sso_start_url'],
2246
- 'sso_region': sso_config['sso_region'],
2247
- 'role_name': sso_config['sso_role_name'],
2248
- 'account_id': sso_config['sso_account_id'],
2249
- 'client_creator': self._client_creator,
2250
- 'token_loader': SSOTokenLoader(cache=self._token_cache),
2251
- 'cache': self.cache,
2252
- }
2253
- if 'sso_session' in sso_config:
2254
- fetcher_kwargs['sso_session_name'] = sso_config['sso_session']
2255
- fetcher_kwargs['token_provider'] = self._token_provider
2256
-
2257
- sso_fetcher = SSOCredentialFetcher(**fetcher_kwargs)
2258
-
2259
- return DeferredRefreshableCredentials(
2260
- method=self.METHOD,
2261
- refresh_using=sso_fetcher.fetch_credentials,
2262
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_manylinux.py DELETED
@@ -1,301 +0,0 @@
1
- import collections
2
- import functools
3
- import os
4
- import re
5
- import struct
6
- import sys
7
- import warnings
8
- from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
9
-
10
-
11
- # Python does not provide platform information at sufficient granularity to
12
- # identify the architecture of the running executable in some cases, so we
13
- # determine it dynamically by reading the information from the running
14
- # process. This only applies on Linux, which uses the ELF format.
15
- class _ELFFileHeader:
16
- # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
17
- class _InvalidELFFileHeader(ValueError):
18
- """
19
- An invalid ELF file header was found.
20
- """
21
-
22
- ELF_MAGIC_NUMBER = 0x7F454C46
23
- ELFCLASS32 = 1
24
- ELFCLASS64 = 2
25
- ELFDATA2LSB = 1
26
- ELFDATA2MSB = 2
27
- EM_386 = 3
28
- EM_S390 = 22
29
- EM_ARM = 40
30
- EM_X86_64 = 62
31
- EF_ARM_ABIMASK = 0xFF000000
32
- EF_ARM_ABI_VER5 = 0x05000000
33
- EF_ARM_ABI_FLOAT_HARD = 0x00000400
34
-
35
- def __init__(self, file: IO[bytes]) -> None:
36
- def unpack(fmt: str) -> int:
37
- try:
38
- data = file.read(struct.calcsize(fmt))
39
- result: Tuple[int, ...] = struct.unpack(fmt, data)
40
- except struct.error:
41
- raise _ELFFileHeader._InvalidELFFileHeader()
42
- return result[0]
43
-
44
- self.e_ident_magic = unpack(">I")
45
- if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
46
- raise _ELFFileHeader._InvalidELFFileHeader()
47
- self.e_ident_class = unpack("B")
48
- if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
49
- raise _ELFFileHeader._InvalidELFFileHeader()
50
- self.e_ident_data = unpack("B")
51
- if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
52
- raise _ELFFileHeader._InvalidELFFileHeader()
53
- self.e_ident_version = unpack("B")
54
- self.e_ident_osabi = unpack("B")
55
- self.e_ident_abiversion = unpack("B")
56
- self.e_ident_pad = file.read(7)
57
- format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
58
- format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
59
- format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
60
- format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
61
- self.e_type = unpack(format_h)
62
- self.e_machine = unpack(format_h)
63
- self.e_version = unpack(format_i)
64
- self.e_entry = unpack(format_p)
65
- self.e_phoff = unpack(format_p)
66
- self.e_shoff = unpack(format_p)
67
- self.e_flags = unpack(format_i)
68
- self.e_ehsize = unpack(format_h)
69
- self.e_phentsize = unpack(format_h)
70
- self.e_phnum = unpack(format_h)
71
- self.e_shentsize = unpack(format_h)
72
- self.e_shnum = unpack(format_h)
73
- self.e_shstrndx = unpack(format_h)
74
-
75
-
76
- def _get_elf_header() -> Optional[_ELFFileHeader]:
77
- try:
78
- with open(sys.executable, "rb") as f:
79
- elf_header = _ELFFileHeader(f)
80
- except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
81
- return None
82
- return elf_header
83
-
84
-
85
- def _is_linux_armhf() -> bool:
86
- # hard-float ABI can be detected from the ELF header of the running
87
- # process
88
- # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
89
- elf_header = _get_elf_header()
90
- if elf_header is None:
91
- return False
92
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
93
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
94
- result &= elf_header.e_machine == elf_header.EM_ARM
95
- result &= (
96
- elf_header.e_flags & elf_header.EF_ARM_ABIMASK
97
- ) == elf_header.EF_ARM_ABI_VER5
98
- result &= (
99
- elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
100
- ) == elf_header.EF_ARM_ABI_FLOAT_HARD
101
- return result
102
-
103
-
104
- def _is_linux_i686() -> bool:
105
- elf_header = _get_elf_header()
106
- if elf_header is None:
107
- return False
108
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
109
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
110
- result &= elf_header.e_machine == elf_header.EM_386
111
- return result
112
-
113
-
114
- def _have_compatible_abi(arch: str) -> bool:
115
- if arch == "armv7l":
116
- return _is_linux_armhf()
117
- if arch == "i686":
118
- return _is_linux_i686()
119
- return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
120
-
121
-
122
- # If glibc ever changes its major version, we need to know what the last
123
- # minor version was, so we can build the complete list of all versions.
124
- # For now, guess what the highest minor version might be, assume it will
125
- # be 50 for testing. Once this actually happens, update the dictionary
126
- # with the actual value.
127
- _LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
128
-
129
-
130
- class _GLibCVersion(NamedTuple):
131
- major: int
132
- minor: int
133
-
134
-
135
- def _glibc_version_string_confstr() -> Optional[str]:
136
- """
137
- Primary implementation of glibc_version_string using os.confstr.
138
- """
139
- # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
140
- # to be broken or missing. This strategy is used in the standard library
141
- # platform module.
142
- # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
143
- try:
144
- # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
145
- version_string = os.confstr("CS_GNU_LIBC_VERSION")
146
- assert version_string is not None
147
- _, version = version_string.split()
148
- except (AssertionError, AttributeError, OSError, ValueError):
149
- # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
150
- return None
151
- return version
152
-
153
-
154
- def _glibc_version_string_ctypes() -> Optional[str]:
155
- """
156
- Fallback implementation of glibc_version_string using ctypes.
157
- """
158
- try:
159
- import ctypes
160
- except ImportError:
161
- return None
162
-
163
- # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
164
- # manpage says, "If filename is NULL, then the returned handle is for the
165
- # main program". This way we can let the linker do the work to figure out
166
- # which libc our process is actually using.
167
- #
168
- # We must also handle the special case where the executable is not a
169
- # dynamically linked executable. This can occur when using musl libc,
170
- # for example. In this situation, dlopen() will error, leading to an
171
- # OSError. Interestingly, at least in the case of musl, there is no
172
- # errno set on the OSError. The single string argument used to construct
173
- # OSError comes from libc itself and is therefore not portable to
174
- # hard code here. In any case, failure to call dlopen() means we
175
- # can proceed, so we bail on our attempt.
176
- try:
177
- process_namespace = ctypes.CDLL(None)
178
- except OSError:
179
- return None
180
-
181
- try:
182
- gnu_get_libc_version = process_namespace.gnu_get_libc_version
183
- except AttributeError:
184
- # Symbol doesn't exist -> therefore, we are not linked to
185
- # glibc.
186
- return None
187
-
188
- # Call gnu_get_libc_version, which returns a string like "2.5"
189
- gnu_get_libc_version.restype = ctypes.c_char_p
190
- version_str: str = gnu_get_libc_version()
191
- # py2 / py3 compatibility:
192
- if not isinstance(version_str, str):
193
- version_str = version_str.decode("ascii")
194
-
195
- return version_str
196
-
197
-
198
- def _glibc_version_string() -> Optional[str]:
199
- """Returns glibc version string, or None if not using glibc."""
200
- return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
201
-
202
-
203
- def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
204
- """Parse glibc version.
205
-
206
- We use a regexp instead of str.split because we want to discard any
207
- random junk that might come after the minor version -- this might happen
208
- in patched/forked versions of glibc (e.g. Linaro's version of glibc
209
- uses version strings like "2.20-2014.11"). See gh-3588.
210
- """
211
- m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
212
- if not m:
213
- warnings.warn(
214
- "Expected glibc version with 2 components major.minor,"
215
- " got: %s" % version_str,
216
- RuntimeWarning,
217
- )
218
- return -1, -1
219
- return int(m.group("major")), int(m.group("minor"))
220
-
221
-
222
- @functools.lru_cache()
223
- def _get_glibc_version() -> Tuple[int, int]:
224
- version_str = _glibc_version_string()
225
- if version_str is None:
226
- return (-1, -1)
227
- return _parse_glibc_version(version_str)
228
-
229
-
230
- # From PEP 513, PEP 600
231
- def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
232
- sys_glibc = _get_glibc_version()
233
- if sys_glibc < version:
234
- return False
235
- # Check for presence of _manylinux module.
236
- try:
237
- import _manylinux # noqa
238
- except ImportError:
239
- return True
240
- if hasattr(_manylinux, "manylinux_compatible"):
241
- result = _manylinux.manylinux_compatible(version[0], version[1], arch)
242
- if result is not None:
243
- return bool(result)
244
- return True
245
- if version == _GLibCVersion(2, 5):
246
- if hasattr(_manylinux, "manylinux1_compatible"):
247
- return bool(_manylinux.manylinux1_compatible)
248
- if version == _GLibCVersion(2, 12):
249
- if hasattr(_manylinux, "manylinux2010_compatible"):
250
- return bool(_manylinux.manylinux2010_compatible)
251
- if version == _GLibCVersion(2, 17):
252
- if hasattr(_manylinux, "manylinux2014_compatible"):
253
- return bool(_manylinux.manylinux2014_compatible)
254
- return True
255
-
256
-
257
- _LEGACY_MANYLINUX_MAP = {
258
- # CentOS 7 w/ glibc 2.17 (PEP 599)
259
- (2, 17): "manylinux2014",
260
- # CentOS 6 w/ glibc 2.12 (PEP 571)
261
- (2, 12): "manylinux2010",
262
- # CentOS 5 w/ glibc 2.5 (PEP 513)
263
- (2, 5): "manylinux1",
264
- }
265
-
266
-
267
- def platform_tags(linux: str, arch: str) -> Iterator[str]:
268
- if not _have_compatible_abi(arch):
269
- return
270
- # Oldest glibc to be supported regardless of architecture is (2, 17).
271
- too_old_glibc2 = _GLibCVersion(2, 16)
272
- if arch in {"x86_64", "i686"}:
273
- # On x86/i686 also oldest glibc to be supported is (2, 5).
274
- too_old_glibc2 = _GLibCVersion(2, 4)
275
- current_glibc = _GLibCVersion(*_get_glibc_version())
276
- glibc_max_list = [current_glibc]
277
- # We can assume compatibility across glibc major versions.
278
- # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
279
- #
280
- # Build a list of maximum glibc versions so that we can
281
- # output the canonical list of all glibc from current_glibc
282
- # down to too_old_glibc2, including all intermediary versions.
283
- for glibc_major in range(current_glibc.major - 1, 1, -1):
284
- glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
285
- glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
286
- for glibc_max in glibc_max_list:
287
- if glibc_max.major == too_old_glibc2.major:
288
- min_minor = too_old_glibc2.minor
289
- else:
290
- # For other glibc major versions oldest supported is (x, 0).
291
- min_minor = -1
292
- for glibc_minor in range(glibc_max.minor, min_minor, -1):
293
- glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
294
- tag = "manylinux_{}_{}".format(*glibc_version)
295
- if _is_compatible(tag, arch, glibc_version):
296
- yield linux.replace("linux", tag)
297
- # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
298
- if glibc_version in _LEGACY_MANYLINUX_MAP:
299
- legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
300
- if _is_compatible(legacy_tag, arch, glibc_version):
301
- yield linux.replace("linux", legacy_tag)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bingsu/color_textual_inversion/LICENSE.md DELETED
@@ -1,22 +0,0 @@
1
-
2
- The MIT License (MIT)
3
-
4
- Copyright (c) 2022 Bingsu
5
-
6
- Permission is hereby granted, free of charge, to any person obtaining a copy
7
- of this software and associated documentation files (the "Software"), to deal
8
- in the Software without restriction, including without limitation the rights
9
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
- copies of the Software, and to permit persons to whom the Software is
11
- furnished to do so, subject to the following conditions:
12
-
13
- The above copyright notice and this permission notice shall be included in all
14
- copies or substantial portions of the Software.
15
-
16
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
- SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CALM/Dashboard/dashboard_utils/main_metrics.py DELETED
@@ -1,29 +0,0 @@
1
- import datetime
2
-
3
- import streamlit as st
4
- import wandb
5
-
6
- from dashboard_utils.time_tracker import _log, simple_time_tracker
7
-
8
- WANDB_RUN_URL = st.secrets["WANDB_RUN_URL_MAIN_METRICS"]
9
- CACHE_TTL = 100
10
-
11
-
12
- @st.cache(ttl=CACHE_TTL, show_spinner=False)
13
- @simple_time_tracker(_log)
14
- def get_main_metrics():
15
- api = wandb.Api()
16
- run = api.run(WANDB_RUN_URL)
17
- history = run.scan_history(keys=["optimizer_step", "loss", "alive peers", "_timestamp"])
18
-
19
- steps = []
20
- losses = []
21
- alive_peers = []
22
- dates = []
23
- for row in history:
24
- steps.append(row["optimizer_step"])
25
- losses.append(row["loss"])
26
- alive_peers.append(row["alive peers"])
27
- dates.append(datetime.datetime.utcfromtimestamp(row["_timestamp"]))
28
-
29
- return steps, dates, losses, alive_peers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_local_bindings.cpp DELETED
@@ -1,101 +0,0 @@
1
- /*
2
- tests/test_local_bindings.cpp -- tests the py::module_local class feature which makes a class
3
- binding local to the module in which it is defined.
4
-
5
- Copyright (c) 2017 Jason Rhinelander <[email protected]>
6
-
7
- All rights reserved. Use of this source code is governed by a
8
- BSD-style license that can be found in the LICENSE file.
9
- */
10
-
11
- #include "pybind11_tests.h"
12
- #include "local_bindings.h"
13
- #include <pybind11/stl.h>
14
- #include <pybind11/stl_bind.h>
15
- #include <numeric>
16
-
17
- TEST_SUBMODULE(local_bindings, m) {
18
- // test_load_external
19
- m.def("load_external1", [](ExternalType1 &e) { return e.i; });
20
- m.def("load_external2", [](ExternalType2 &e) { return e.i; });
21
-
22
- // test_local_bindings
23
- // Register a class with py::module_local:
24
- bind_local<LocalType, -1>(m, "LocalType", py::module_local())
25
- .def("get3", [](LocalType &t) { return t.i + 3; })
26
- ;
27
-
28
- m.def("local_value", [](LocalType &l) { return l.i; });
29
-
30
- // test_nonlocal_failure
31
- // The main pybind11 test module is loaded first, so this registration will succeed (the second
32
- // one, in pybind11_cross_module_tests.cpp, is designed to fail):
33
- bind_local<NonLocalType, 0>(m, "NonLocalType")
34
- .def(py::init<int>())
35
- .def("get", [](LocalType &i) { return i.i; })
36
- ;
37
-
38
- // test_duplicate_local
39
- // py::module_local declarations should be visible across compilation units that get linked together;
40
- // this tries to register a duplicate local. It depends on a definition in test_class.cpp and
41
- // should raise a runtime error from the duplicate definition attempt. If test_class isn't
42
- // available it *also* throws a runtime error (with "test_class not enabled" as value).
43
- m.def("register_local_external", [m]() {
44
- auto main = py::module::import("pybind11_tests");
45
- if (py::hasattr(main, "class_")) {
46
- bind_local<LocalExternal, 7>(m, "LocalExternal", py::module_local());
47
- }
48
- else throw std::runtime_error("test_class not enabled");
49
- });
50
-
51
- // test_stl_bind_local
52
- // stl_bind.h binders defaults to py::module_local if the types are local or converting:
53
- py::bind_vector<LocalVec>(m, "LocalVec");
54
- py::bind_map<LocalMap>(m, "LocalMap");
55
- // and global if the type (or one of the types, for the map) is global:
56
- py::bind_vector<NonLocalVec>(m, "NonLocalVec");
57
- py::bind_map<NonLocalMap>(m, "NonLocalMap");
58
-
59
- // test_stl_bind_global
60
- // They can, however, be overridden to global using `py::module_local(false)`:
61
- bind_local<NonLocal2, 10>(m, "NonLocal2");
62
- py::bind_vector<LocalVec2>(m, "LocalVec2", py::module_local());
63
- py::bind_map<NonLocalMap2>(m, "NonLocalMap2", py::module_local(false));
64
-
65
- // test_mixed_local_global
66
- // We try this both with the global type registered first and vice versa (the order shouldn't
67
- // matter).
68
- m.def("register_mixed_global", [m]() {
69
- bind_local<MixedGlobalLocal, 100>(m, "MixedGlobalLocal", py::module_local(false));
70
- });
71
- m.def("register_mixed_local", [m]() {
72
- bind_local<MixedLocalGlobal, 1000>(m, "MixedLocalGlobal", py::module_local());
73
- });
74
- m.def("get_mixed_gl", [](int i) { return MixedGlobalLocal(i); });
75
- m.def("get_mixed_lg", [](int i) { return MixedLocalGlobal(i); });
76
-
77
- // test_internal_locals_differ
78
- m.def("local_cpp_types_addr", []() { return (uintptr_t) &py::detail::registered_local_types_cpp(); });
79
-
80
- // test_stl_caster_vs_stl_bind
81
- m.def("load_vector_via_caster", [](std::vector<int> v) {
82
- return std::accumulate(v.begin(), v.end(), 0);
83
- });
84
-
85
- // test_cross_module_calls
86
- m.def("return_self", [](LocalVec *v) { return v; });
87
- m.def("return_copy", [](const LocalVec &v) { return LocalVec(v); });
88
-
89
- class Cat : public pets::Pet { public: Cat(std::string name) : Pet(name) {}; };
90
- py::class_<pets::Pet>(m, "Pet", py::module_local())
91
- .def("get_name", &pets::Pet::name);
92
- // Binding for local extending class:
93
- py::class_<Cat, pets::Pet>(m, "Cat")
94
- .def(py::init<std::string>());
95
- m.def("pet_name", [](pets::Pet &p) { return p.name(); });
96
-
97
- py::class_<MixGL>(m, "MixGL").def(py::init<int>());
98
- m.def("get_gl_value", [](MixGL &o) { return o.i + 10; });
99
-
100
- py::class_<MixGL2>(m, "MixGL2").def(py::init<int>());
101
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/unique_by_key.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits unique_by_key
22
- #include <thrust/system/detail/sequential/unique_by_key.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/automatic_mask_generator.py DELETED
@@ -1,372 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import numpy as np
8
- import torch
9
- from torchvision.ops.boxes import batched_nms, box_area # type: ignore
10
-
11
- from typing import Any, Dict, List, Optional, Tuple
12
-
13
- from .modeling import Sam
14
- from .predictor import SamPredictor
15
- from .utils.amg import (
16
- MaskData,
17
- area_from_rle,
18
- batch_iterator,
19
- batched_mask_to_box,
20
- box_xyxy_to_xywh,
21
- build_all_layer_point_grids,
22
- calculate_stability_score,
23
- coco_encode_rle,
24
- generate_crop_boxes,
25
- is_box_near_crop_edge,
26
- mask_to_rle_pytorch,
27
- remove_small_regions,
28
- rle_to_mask,
29
- uncrop_boxes_xyxy,
30
- uncrop_masks,
31
- uncrop_points,
32
- )
33
-
34
-
35
- class SamAutomaticMaskGenerator:
36
- def __init__(
37
- self,
38
- model: Sam,
39
- points_per_side: Optional[int] = 32,
40
- points_per_batch: int = 128,
41
- pred_iou_thresh: float = 0.88,
42
- stability_score_thresh: float = 0.95,
43
- stability_score_offset: float = 1.0,
44
- box_nms_thresh: float = 0.7,
45
- crop_n_layers: int = 0,
46
- crop_nms_thresh: float = 0.7,
47
- crop_overlap_ratio: float = 512 / 1500,
48
- crop_n_points_downscale_factor: int = 1,
49
- point_grids: Optional[List[np.ndarray]] = None,
50
- min_mask_region_area: int = 0,
51
- output_mode: str = "binary_mask",
52
- ) -> None:
53
- """
54
- Using a SAM model, generates masks for the entire image.
55
- Generates a grid of point prompts over the image, then filters
56
- low quality and duplicate masks. The default settings are chosen
57
- for SAM with a ViT-H backbone.
58
-
59
- Arguments:
60
- model (Sam): The SAM model to use for mask prediction.
61
- points_per_side (int or None): The number of points to be sampled
62
- along one side of the image. The total number of points is
63
- points_per_side**2. If None, 'point_grids' must provide explicit
64
- point sampling.
65
- points_per_batch (int): Sets the number of points run simultaneously
66
- by the model. Higher numbers may be faster but use more GPU memory.
67
- pred_iou_thresh (float): A filtering threshold in [0,1], using the
68
- model's predicted mask quality.
69
- stability_score_thresh (float): A filtering threshold in [0,1], using
70
- the stability of the mask under changes to the cutoff used to binarize
71
- the model's mask predictions.
72
- stability_score_offset (float): The amount to shift the cutoff when
73
- calculated the stability score.
74
- box_nms_thresh (float): The box IoU cutoff used by non-maximal
75
- suppression to filter duplicate masks.
76
- crops_n_layers (int): If >0, mask prediction will be run again on
77
- crops of the image. Sets the number of layers to run, where each
78
- layer has 2**i_layer number of image crops.
79
- crops_nms_thresh (float): The box IoU cutoff used by non-maximal
80
- suppression to filter duplicate masks between different crops.
81
- crop_overlap_ratio (float): Sets the degree to which crops overlap.
82
- In the first crop layer, crops will overlap by this fraction of
83
- the image length. Later layers with more crops scale down this overlap.
84
- crop_n_points_downscale_factor (int): The number of points-per-side
85
- sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
86
- point_grids (list(np.ndarray) or None): A list over explicit grids
87
- of points used for sampling, normalized to [0,1]. The nth grid in the
88
- list is used in the nth crop layer. Exclusive with points_per_side.
89
- min_mask_region_area (int): If >0, postprocessing will be applied
90
- to remove disconnected regions and holes in masks with area smaller
91
- than min_mask_region_area. Requires opencv.
92
- output_mode (str): The form masks are returned in. Can be 'binary_mask',
93
- 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
94
- For large resolutions, 'binary_mask' may consume large amounts of
95
- memory.
96
- """
97
-
98
- assert (points_per_side is None) != (
99
- point_grids is None
100
- ), "Exactly one of points_per_side or point_grid must be provided."
101
- if points_per_side is not None:
102
- self.point_grids = build_all_layer_point_grids(
103
- points_per_side,
104
- crop_n_layers,
105
- crop_n_points_downscale_factor,
106
- )
107
- elif point_grids is not None:
108
- self.point_grids = point_grids
109
- else:
110
- raise ValueError("Can't have both points_per_side and point_grid be None.")
111
-
112
- assert output_mode in [
113
- "binary_mask",
114
- "uncompressed_rle",
115
- "coco_rle",
116
- ], f"Unknown output_mode {output_mode}."
117
- if output_mode == "coco_rle":
118
- from pycocotools import mask as mask_utils # type: ignore # noqa: F401
119
-
120
- if min_mask_region_area > 0:
121
- import cv2 # type: ignore # noqa: F401
122
-
123
- self.predictor = SamPredictor(model)
124
- self.points_per_batch = points_per_batch
125
- self.pred_iou_thresh = pred_iou_thresh
126
- self.stability_score_thresh = stability_score_thresh
127
- self.stability_score_offset = stability_score_offset
128
- self.box_nms_thresh = box_nms_thresh
129
- self.crop_n_layers = crop_n_layers
130
- self.crop_nms_thresh = crop_nms_thresh
131
- self.crop_overlap_ratio = crop_overlap_ratio
132
- self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
133
- self.min_mask_region_area = min_mask_region_area
134
- self.output_mode = output_mode
135
-
136
- @torch.no_grad()
137
- def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
138
- """
139
- Generates masks for the given image.
140
-
141
- Arguments:
142
- image (np.ndarray): The image to generate masks for, in HWC uint8 format.
143
-
144
- Returns:
145
- list(dict(str, any)): A list over records for masks. Each record is
146
- a dict containing the following keys:
147
- segmentation (dict(str, any) or np.ndarray): The mask. If
148
- output_mode='binary_mask', is an array of shape HW. Otherwise,
149
- is a dictionary containing the RLE.
150
- bbox (list(float)): The box around the mask, in XYWH format.
151
- area (int): The area in pixels of the mask.
152
- predicted_iou (float): The model's own prediction of the mask's
153
- quality. This is filtered by the pred_iou_thresh parameter.
154
- point_coords (list(list(float))): The point coordinates input
155
- to the model to generate this mask.
156
- stability_score (float): A measure of the mask's quality. This
157
- is filtered on using the stability_score_thresh parameter.
158
- crop_box (list(float)): The crop of the image used to generate
159
- the mask, given in XYWH format.
160
- """
161
-
162
- # Generate masks
163
- mask_data = self._generate_masks(image)
164
-
165
- # Filter small disconnected regions and holes in masks
166
- if self.min_mask_region_area > 0:
167
- mask_data = self.postprocess_small_regions(
168
- mask_data,
169
- self.min_mask_region_area,
170
- max(self.box_nms_thresh, self.crop_nms_thresh),
171
- )
172
-
173
- # Encode masks
174
- if self.output_mode == "coco_rle":
175
- mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
176
- elif self.output_mode == "binary_mask":
177
- mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
178
- else:
179
- mask_data["segmentations"] = mask_data["rles"]
180
-
181
- # Write mask records
182
- curr_anns = []
183
- for idx in range(len(mask_data["segmentations"])):
184
- ann = {
185
- "segmentation": mask_data["segmentations"][idx],
186
- "area": area_from_rle(mask_data["rles"][idx]),
187
- "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
188
- "predicted_iou": mask_data["iou_preds"][idx].item(),
189
- "point_coords": [mask_data["points"][idx].tolist()],
190
- "stability_score": mask_data["stability_score"][idx].item(),
191
- "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
192
- }
193
- curr_anns.append(ann)
194
-
195
- return curr_anns
196
-
197
- def _generate_masks(self, image: np.ndarray) -> MaskData:
198
- orig_size = image.shape[:2]
199
- crop_boxes, layer_idxs = generate_crop_boxes(
200
- orig_size, self.crop_n_layers, self.crop_overlap_ratio
201
- )
202
-
203
- # Iterate over image crops
204
- data = MaskData()
205
- for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
206
- crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
207
- data.cat(crop_data)
208
-
209
- # Remove duplicate masks between crops
210
- if len(crop_boxes) > 1:
211
- # Prefer masks from smaller crops
212
- scores = 1 / box_area(data["crop_boxes"])
213
- scores = scores.to(data["boxes"].device)
214
- keep_by_nms = batched_nms(
215
- data["boxes"].float(),
216
- scores,
217
- torch.zeros(len(data["boxes"])), # categories
218
- iou_threshold=self.crop_nms_thresh,
219
- )
220
- data.filter(keep_by_nms)
221
-
222
- data.to_numpy()
223
- return data
224
-
225
- def _process_crop(
226
- self,
227
- image: np.ndarray,
228
- crop_box: List[int],
229
- crop_layer_idx: int,
230
- orig_size: Tuple[int, ...],
231
- ) -> MaskData:
232
- # Crop the image and calculate embeddings
233
- x0, y0, x1, y1 = crop_box
234
- cropped_im = image[y0:y1, x0:x1, :]
235
- cropped_im_size = cropped_im.shape[:2]
236
- self.predictor.set_image(cropped_im)
237
-
238
- # Get points for this crop
239
- points_scale = np.array(cropped_im_size)[None, ::-1]
240
- points_for_image = self.point_grids[crop_layer_idx] * points_scale
241
-
242
- # Generate masks for this crop in batches
243
- data = MaskData()
244
- for (points,) in batch_iterator(self.points_per_batch, points_for_image):
245
- batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size)
246
- data.cat(batch_data)
247
- del batch_data
248
- self.predictor.reset_image()
249
-
250
- # Remove duplicates within this crop.
251
- keep_by_nms = batched_nms(
252
- data["boxes"].float(),
253
- data["iou_preds"],
254
- torch.zeros(len(data["boxes"])), # categories
255
- iou_threshold=self.box_nms_thresh,
256
- )
257
- data.filter(keep_by_nms)
258
-
259
- # Return to the original image frame
260
- data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
261
- data["points"] = uncrop_points(data["points"], crop_box)
262
- data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
263
-
264
- return data
265
-
266
- def _process_batch(
267
- self,
268
- points: np.ndarray,
269
- im_size: Tuple[int, ...],
270
- crop_box: List[int],
271
- orig_size: Tuple[int, ...],
272
- ) -> MaskData:
273
- orig_h, orig_w = orig_size
274
-
275
- # Run model on this batch
276
- transformed_points = self.predictor.transform.apply_coords(points, im_size)
277
- in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
278
- in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
279
- masks, iou_preds, _ = self.predictor.predict_torch(
280
- in_points[:, None, :],
281
- in_labels[:, None],
282
- multimask_output=True,
283
- return_logits=True,
284
- )
285
-
286
- # Serialize predictions and store in MaskData
287
- data = MaskData(
288
- masks=masks.flatten(0, 1),
289
- iou_preds=iou_preds.flatten(0, 1),
290
- points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
291
- )
292
- del masks
293
-
294
- # Filter by predicted IoU
295
- if self.pred_iou_thresh > 0.0:
296
- keep_mask = data["iou_preds"] > self.pred_iou_thresh
297
- data.filter(keep_mask)
298
-
299
- # Calculate stability score
300
- data["stability_score"] = calculate_stability_score(
301
- data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset
302
- )
303
- if self.stability_score_thresh > 0.0:
304
- keep_mask = data["stability_score"] >= self.stability_score_thresh
305
- data.filter(keep_mask)
306
-
307
- # Threshold masks and calculate boxes
308
- data["masks"] = data["masks"] > self.predictor.model.mask_threshold
309
- data["boxes"] = batched_mask_to_box(data["masks"])
310
-
311
- # Filter boxes that touch crop boundaries
312
- keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
313
- if not torch.all(keep_mask):
314
- data.filter(keep_mask)
315
-
316
- # Compress to RLE
317
- data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
318
- data["rles"] = mask_to_rle_pytorch(data["masks"])
319
- del data["masks"]
320
-
321
- return data
322
-
323
- @staticmethod
324
- def postprocess_small_regions(
325
- mask_data: MaskData, min_area: int, nms_thresh: float
326
- ) -> MaskData:
327
- """
328
- Removes small disconnected regions and holes in masks, then reruns
329
- box NMS to remove any new duplicates.
330
-
331
- Edits mask_data in place.
332
-
333
- Requires open-cv as a dependency.
334
- """
335
- if len(mask_data["rles"]) == 0:
336
- return mask_data
337
-
338
- # Filter small disconnected regions and holes
339
- new_masks = []
340
- scores = []
341
- for rle in mask_data["rles"]:
342
- mask = rle_to_mask(rle)
343
-
344
- mask, changed = remove_small_regions(mask, min_area, mode="holes")
345
- unchanged = not changed
346
- mask, changed = remove_small_regions(mask, min_area, mode="islands")
347
- unchanged = unchanged and not changed
348
-
349
- new_masks.append(torch.as_tensor(mask).unsqueeze(0))
350
- # Give score=0 to changed masks and score=1 to unchanged masks
351
- # so NMS will prefer ones that didn't need postprocessing
352
- scores.append(float(unchanged))
353
-
354
- # Recalculate boxes and remove any new duplicates
355
- masks = torch.cat(new_masks, dim=0)
356
- boxes = batched_mask_to_box(masks)
357
- keep_by_nms = batched_nms(
358
- boxes.float(),
359
- torch.as_tensor(scores),
360
- torch.zeros(len(boxes)), # categories
361
- iou_threshold=nms_thresh,
362
- )
363
-
364
- # Only recalculate RLEs for masks that have changed
365
- for i_mask in keep_by_nms:
366
- if scores[i_mask] == 0.0:
367
- mask_torch = masks[i_mask].unsqueeze(0)
368
- mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
369
- mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
370
- mask_data.filter(keep_by_nms)
371
-
372
- return mask_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Catmeow/AI_story_writing/app.py DELETED
@@ -1,44 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
- title = "tory Generator"
4
-
5
- # gpt-neo-2.7B gpt-j-6B
6
-
7
- def generate(text,the_model,max_length,temperature,repetition_penalty):
8
- generator = pipeline('text-generation', model=the_model)
9
- result = generator(text, num_return_sequences=3,
10
- max_length=max_length,
11
- temperature=temperature,
12
- repetition_penalty = repetition_penalty,
13
- no_repeat_ngram_size=2,early_stopping=False)
14
- return result[0]["generated_text"],result[1]["generated_text"],result[2]["generated_text"]
15
-
16
-
17
- def complete_with_gpt(text,context,the_model,max_length,temperature,repetition_penalty):
18
- # Use the last [context] characters of the text as context
19
- max_length = max_length+context
20
- return generate(text[-context:],the_model,max_length,temperature,repetition_penalty)
21
-
22
- def send(text1,context,text2):
23
- if len(text1)<context:
24
- return text1 + text2[len(text1):]
25
- else:
26
- return text1 + text2[context:]
27
-
28
- with gr.Blocks() as demo:
29
- textbox = gr.Textbox(placeholder="Type here and press enter...", lines=4)
30
- btn = gr.Button("Generate")
31
- context = gr.Slider(value=200,label="Truncate input text length (AI's memory)",minimum=1,maximum=500)
32
- the_model = gr.Dropdown(choices=['gpt2','gpt2-medium','gpt2-large','gpt2-xl','EleutherAI/gpt-neo-2.7B','EleutherAI/gpt-j-6B'],value = 'gpt2',label="Choose model")
33
- max_length = gr.Slider(value=20,label="Max Generate Length",minimum=1,maximum=50)
34
- temperature = gr.Slider(value=0.9,label="Temperature",minimum=0.0,maximum=1.0,step=0.05)
35
- repetition_penalty = gr.Slider(value=1.5,label="Repetition penalty",minimum=0.2,maximum=2,step=0.1)
36
- output1 = gr.Textbox(lines=4,label='1')
37
- send1 = gr.Button("Send1 to Origin Textbox").click(send,inputs=[textbox,context,output1],outputs=textbox)
38
- output2 = gr.Textbox(lines=4,label='2')
39
- send2 = gr.Button("Send2 to Origin Textbox").click(send,inputs=[textbox,context,output2],outputs=textbox)
40
- output3 = gr.Textbox(lines=4,label='3')
41
- send3 = gr.Button("Send3 to Origin Textbox").click(send,inputs=[textbox,context,output3],outputs=textbox)
42
- btn.click(complete_with_gpt,inputs=[textbox,context,the_model,max_length,temperature,repetition_penalty], outputs=[output1,output2,output3])
43
-
44
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClueAI/ChatYuan-large-v2/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ChatYuan Large V2
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cropinky/esrgan/realesrgan/archs/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import arch modules for registry
6
- # scan all the files that end with '_arch.py' under the archs folder
7
- arch_folder = osp.dirname(osp.abspath(__file__))
8
- arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
9
- # import all the arch modules
10
- _arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames]
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/cldm/logger.py DELETED
@@ -1,76 +0,0 @@
1
- import os
2
-
3
- import numpy as np
4
- import torch
5
- import torchvision
6
- from PIL import Image
7
- from pytorch_lightning.callbacks import Callback
8
- from pytorch_lightning.utilities.distributed import rank_zero_only
9
-
10
-
11
- class ImageLogger(Callback):
12
- def __init__(self, batch_frequency=2000, max_images=4, clamp=True, increase_log_steps=True,
13
- rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
14
- log_images_kwargs=None):
15
- super().__init__()
16
- self.rescale = rescale
17
- self.batch_freq = batch_frequency
18
- self.max_images = max_images
19
- if not increase_log_steps:
20
- self.log_steps = [self.batch_freq]
21
- self.clamp = clamp
22
- self.disabled = disabled
23
- self.log_on_batch_idx = log_on_batch_idx
24
- self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
25
- self.log_first_step = log_first_step
26
-
27
- @rank_zero_only
28
- def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
29
- root = os.path.join(save_dir, "image_log", split)
30
- for k in images:
31
- grid = torchvision.utils.make_grid(images[k], nrow=4)
32
- if self.rescale:
33
- grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
34
- grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
35
- grid = grid.numpy()
36
- grid = (grid * 255).astype(np.uint8)
37
- filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx)
38
- path = os.path.join(root, filename)
39
- os.makedirs(os.path.split(path)[0], exist_ok=True)
40
- Image.fromarray(grid).save(path)
41
-
42
- def log_img(self, pl_module, batch, batch_idx, split="train"):
43
- check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step
44
- if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
45
- hasattr(pl_module, "log_images") and
46
- callable(pl_module.log_images) and
47
- self.max_images > 0):
48
- logger = type(pl_module.logger)
49
-
50
- is_train = pl_module.training
51
- if is_train:
52
- pl_module.eval()
53
-
54
- with torch.no_grad():
55
- images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
56
-
57
- for k in images:
58
- N = min(images[k].shape[0], self.max_images)
59
- images[k] = images[k][:N]
60
- if isinstance(images[k], torch.Tensor):
61
- images[k] = images[k].detach().cpu()
62
- if self.clamp:
63
- images[k] = torch.clamp(images[k], -1., 1.)
64
-
65
- self.log_local(pl_module.logger.save_dir, split, images,
66
- pl_module.global_step, pl_module.current_epoch, batch_idx)
67
-
68
- if is_train:
69
- pl_module.train()
70
-
71
- def check_frequency(self, check_idx):
72
- return check_idx % self.batch_freq == 0
73
-
74
- def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
75
- if not self.disabled:
76
- self.log_img(pl_module, batch, batch_idx, split="train")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/bin/Activate.ps1 DELETED
@@ -1,247 +0,0 @@
1
- <#
2
- .Synopsis
3
- Activate a Python virtual environment for the current PowerShell session.
4
-
5
- .Description
6
- Pushes the python executable for a virtual environment to the front of the
7
- $Env:PATH environment variable and sets the prompt to signify that you are
8
- in a Python virtual environment. Makes use of the command line switches as
9
- well as the `pyvenv.cfg` file values present in the virtual environment.
10
-
11
- .Parameter VenvDir
12
- Path to the directory that contains the virtual environment to activate. The
13
- default value for this is the parent of the directory that the Activate.ps1
14
- script is located within.
15
-
16
- .Parameter Prompt
17
- The prompt prefix to display when this virtual environment is activated. By
18
- default, this prompt is the name of the virtual environment folder (VenvDir)
19
- surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
-
21
- .Example
22
- Activate.ps1
23
- Activates the Python virtual environment that contains the Activate.ps1 script.
24
-
25
- .Example
26
- Activate.ps1 -Verbose
27
- Activates the Python virtual environment that contains the Activate.ps1 script,
28
- and shows extra information about the activation as it executes.
29
-
30
- .Example
31
- Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
- Activates the Python virtual environment located in the specified location.
33
-
34
- .Example
35
- Activate.ps1 -Prompt "MyPython"
36
- Activates the Python virtual environment that contains the Activate.ps1 script,
37
- and prefixes the current prompt with the specified string (surrounded in
38
- parentheses) while the virtual environment is active.
39
-
40
- .Notes
41
- On Windows, it may be required to enable this Activate.ps1 script by setting the
42
- execution policy for the user. You can do this by issuing the following PowerShell
43
- command:
44
-
45
- PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
-
47
- For more information on Execution Policies:
48
- https://go.microsoft.com/fwlink/?LinkID=135170
49
-
50
- #>
51
- Param(
52
- [Parameter(Mandatory = $false)]
53
- [String]
54
- $VenvDir,
55
- [Parameter(Mandatory = $false)]
56
- [String]
57
- $Prompt
58
- )
59
-
60
- <# Function declarations --------------------------------------------------- #>
61
-
62
- <#
63
- .Synopsis
64
- Remove all shell session elements added by the Activate script, including the
65
- addition of the virtual environment's Python executable from the beginning of
66
- the PATH variable.
67
-
68
- .Parameter NonDestructive
69
- If present, do not remove this function from the global namespace for the
70
- session.
71
-
72
- #>
73
- function global:deactivate ([switch]$NonDestructive) {
74
- # Revert to original values
75
-
76
- # The prior prompt:
77
- if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
- Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
- Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
- }
81
-
82
- # The prior PYTHONHOME:
83
- if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
- Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
- Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
- }
87
-
88
- # The prior PATH:
89
- if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
- Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
- Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
- }
93
-
94
- # Just remove the VIRTUAL_ENV altogether:
95
- if (Test-Path -Path Env:VIRTUAL_ENV) {
96
- Remove-Item -Path env:VIRTUAL_ENV
97
- }
98
-
99
- # Just remove VIRTUAL_ENV_PROMPT altogether.
100
- if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
- Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
- }
103
-
104
- # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
- if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
- Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
- }
108
-
109
- # Leave deactivate function in the global namespace if requested:
110
- if (-not $NonDestructive) {
111
- Remove-Item -Path function:deactivate
112
- }
113
- }
114
-
115
- <#
116
- .Description
117
- Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
- given folder, and returns them in a map.
119
-
120
- For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
- two strings separated by `=` (with any amount of whitespace surrounding the =)
122
- then it is considered a `key = value` line. The left hand string is the key,
123
- the right hand is the value.
124
-
125
- If the value starts with a `'` or a `"` then the first and last character is
126
- stripped from the value before being captured.
127
-
128
- .Parameter ConfigDir
129
- Path to the directory that contains the `pyvenv.cfg` file.
130
- #>
131
- function Get-PyVenvConfig(
132
- [String]
133
- $ConfigDir
134
- ) {
135
- Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
-
137
- # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
- $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
-
140
- # An empty map will be returned if no config file is found.
141
- $pyvenvConfig = @{ }
142
-
143
- if ($pyvenvConfigPath) {
144
-
145
- Write-Verbose "File exists, parse `key = value` lines"
146
- $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
-
148
- $pyvenvConfigContent | ForEach-Object {
149
- $keyval = $PSItem -split "\s*=\s*", 2
150
- if ($keyval[0] -and $keyval[1]) {
151
- $val = $keyval[1]
152
-
153
- # Remove extraneous quotations around a string value.
154
- if ("'""".Contains($val.Substring(0, 1))) {
155
- $val = $val.Substring(1, $val.Length - 2)
156
- }
157
-
158
- $pyvenvConfig[$keyval[0]] = $val
159
- Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
- }
161
- }
162
- }
163
- return $pyvenvConfig
164
- }
165
-
166
-
167
- <# Begin Activate script --------------------------------------------------- #>
168
-
169
- # Determine the containing directory of this script
170
- $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
- $VenvExecDir = Get-Item -Path $VenvExecPath
172
-
173
- Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
- Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
- Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
-
177
- # Set values required in priority: CmdLine, ConfigFile, Default
178
- # First, get the location of the virtual environment, it might not be
179
- # VenvExecDir if specified on the command line.
180
- if ($VenvDir) {
181
- Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
- }
183
- else {
184
- Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
- $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
- Write-Verbose "VenvDir=$VenvDir"
187
- }
188
-
189
- # Next, read the `pyvenv.cfg` file to determine any required value such
190
- # as `prompt`.
191
- $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
-
193
- # Next, set the prompt from the command line, or the config file, or
194
- # just use the name of the virtual environment folder.
195
- if ($Prompt) {
196
- Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
- }
198
- else {
199
- Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
- if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
- Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
- $Prompt = $pyvenvCfg['prompt'];
203
- }
204
- else {
205
- Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
- Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
- $Prompt = Split-Path -Path $venvDir -Leaf
208
- }
209
- }
210
-
211
- Write-Verbose "Prompt = '$Prompt'"
212
- Write-Verbose "VenvDir='$VenvDir'"
213
-
214
- # Deactivate any currently active virtual environment, but leave the
215
- # deactivate function in place.
216
- deactivate -nondestructive
217
-
218
- # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
- # that there is an activated venv.
220
- $env:VIRTUAL_ENV = $VenvDir
221
-
222
- if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
223
-
224
- Write-Verbose "Setting prompt to '$Prompt'"
225
-
226
- # Set the prompt to include the env name
227
- # Make sure _OLD_VIRTUAL_PROMPT is global
228
- function global:_OLD_VIRTUAL_PROMPT { "" }
229
- Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
230
- New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
231
-
232
- function global:prompt {
233
- Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
234
- _OLD_VIRTUAL_PROMPT
235
- }
236
- $env:VIRTUAL_ENV_PROMPT = $Prompt
237
- }
238
-
239
- # Clear PYTHONHOME
240
- if (Test-Path -Path Env:PYTHONHOME) {
241
- Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
242
- Remove-Item -Path Env:PYTHONHOME
243
- }
244
-
245
- # Add the venv to the PATH
246
- Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
247
- $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/CurImagePlugin.py DELETED
@@ -1,75 +0,0 @@
1
- #
2
- # The Python Imaging Library.
3
- # $Id$
4
- #
5
- # Windows Cursor support for PIL
6
- #
7
- # notes:
8
- # uses BmpImagePlugin.py to read the bitmap data.
9
- #
10
- # history:
11
- # 96-05-27 fl Created
12
- #
13
- # Copyright (c) Secret Labs AB 1997.
14
- # Copyright (c) Fredrik Lundh 1996.
15
- #
16
- # See the README file for information on usage and redistribution.
17
- #
18
- from . import BmpImagePlugin, Image
19
- from ._binary import i16le as i16
20
- from ._binary import i32le as i32
21
-
22
- #
23
- # --------------------------------------------------------------------
24
-
25
-
26
- def _accept(prefix):
27
- return prefix[:4] == b"\0\0\2\0"
28
-
29
-
30
- ##
31
- # Image plugin for Windows Cursor files.
32
-
33
-
34
- class CurImageFile(BmpImagePlugin.BmpImageFile):
35
- format = "CUR"
36
- format_description = "Windows Cursor"
37
-
38
- def _open(self):
39
- offset = self.fp.tell()
40
-
41
- # check magic
42
- s = self.fp.read(6)
43
- if not _accept(s):
44
- msg = "not a CUR file"
45
- raise SyntaxError(msg)
46
-
47
- # pick the largest cursor in the file
48
- m = b""
49
- for i in range(i16(s, 4)):
50
- s = self.fp.read(16)
51
- if not m:
52
- m = s
53
- elif s[0] > m[0] and s[1] > m[1]:
54
- m = s
55
- if not m:
56
- msg = "No cursors were found"
57
- raise TypeError(msg)
58
-
59
- # load as bitmap
60
- self._bitmap(i32(m, 12) + offset)
61
-
62
- # patch up the bitmap height
63
- self._size = self.size[0], self.size[1] // 2
64
- d, e, o, a = self.tile[0]
65
- self.tile[0] = d, (0, 0) + self.size, o, a
66
-
67
- return
68
-
69
-
70
- #
71
- # --------------------------------------------------------------------
72
-
73
- Image.register_open(CurImageFile.format, CurImageFile, _accept)
74
-
75
- Image.register_extension(CurImageFile.format, ".cur")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-aef3869a.css DELETED
@@ -1 +0,0 @@
1
- td.svelte-xrr240.svelte-xrr240{width:45%}td.svelte-xrr240.svelte-xrr240:last-child{width:10%;text-align:right}.file-preview-holder.svelte-xrr240.svelte-xrr240{overflow-x:auto}.file-preview.svelte-xrr240.svelte-xrr240{width:var(--size-full);max-height:var(--size-60);overflow-y:auto;color:var(--body-text-color)}.file.svelte-xrr240.svelte-xrr240{width:var(--size-full)}.file.svelte-xrr240>.svelte-xrr240{padding:var(--size-1) var(--size-2-5)}.download.svelte-xrr240.svelte-xrr240:hover{text-decoration:underline}.download.svelte-xrr240>a.svelte-xrr240{color:var(--link-text-color)}.download.svelte-xrr240>a.svelte-xrr240:hover{color:var(--link-text-color-hover)}.download.svelte-xrr240>a.svelte-xrr240:visited{color:var(--link-text-color-visited)}.download.svelte-xrr240>a.svelte-xrr240:active{color:var(--link-text-color-active)}.selectable.svelte-xrr240.svelte-xrr240{cursor:pointer}
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/user.py DELETED
@@ -1,191 +0,0 @@
1
- # Copyright 2020 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import subprocess
15
- from argparse import _SubParsersAction
16
-
17
- from requests.exceptions import HTTPError
18
-
19
- from huggingface_hub.commands import BaseHuggingfaceCLICommand
20
- from huggingface_hub.constants import (
21
- ENDPOINT,
22
- REPO_TYPES,
23
- REPO_TYPES_URL_PREFIXES,
24
- SPACES_SDK_TYPES,
25
- )
26
- from huggingface_hub.hf_api import HfApi
27
-
28
- from .._login import ( # noqa: F401 # for backward compatibility # noqa: F401 # for backward compatibility
29
- NOTEBOOK_LOGIN_PASSWORD_HTML,
30
- NOTEBOOK_LOGIN_TOKEN_HTML_END,
31
- NOTEBOOK_LOGIN_TOKEN_HTML_START,
32
- login,
33
- logout,
34
- notebook_login,
35
- )
36
- from ..utils import HfFolder
37
- from ._cli_utils import ANSI
38
-
39
-
40
- class UserCommands(BaseHuggingfaceCLICommand):
41
- @staticmethod
42
- def register_subcommand(parser: _SubParsersAction):
43
- login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens")
44
- login_parser.add_argument(
45
- "--token",
46
- type=str,
47
- help="Token generated from https://huggingface.co/settings/tokens",
48
- )
49
- login_parser.add_argument(
50
- "--add-to-git-credential",
51
- action="store_true",
52
- help="Optional: Save token to git credential helper.",
53
- )
54
- login_parser.set_defaults(func=lambda args: LoginCommand(args))
55
- whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
56
- whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
57
- logout_parser = parser.add_parser("logout", help="Log out")
58
- logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
59
-
60
- # new system: git-based repo system
61
- repo_parser = parser.add_parser(
62
- "repo",
63
- help="{create, ls-files} Commands to interact with your huggingface.co repos.",
64
- )
65
- repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands")
66
- repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co")
67
- repo_create_parser.add_argument(
68
- "name",
69
- type=str,
70
- help="Name for your repo. Will be namespaced under your username to build the repo id.",
71
- )
72
- repo_create_parser.add_argument(
73
- "--type",
74
- type=str,
75
- help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
76
- )
77
- repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
78
- repo_create_parser.add_argument(
79
- "--space_sdk",
80
- type=str,
81
- help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
82
- choices=SPACES_SDK_TYPES,
83
- )
84
- repo_create_parser.add_argument(
85
- "-y",
86
- "--yes",
87
- action="store_true",
88
- help="Optional: answer Yes to the prompt",
89
- )
90
- repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
91
-
92
-
93
- class BaseUserCommand:
94
- def __init__(self, args):
95
- self.args = args
96
- self._api = HfApi()
97
-
98
-
99
- class LoginCommand(BaseUserCommand):
100
- def run(self):
101
- login(token=self.args.token, add_to_git_credential=self.args.add_to_git_credential)
102
-
103
-
104
- class LogoutCommand(BaseUserCommand):
105
- def run(self):
106
- logout()
107
-
108
-
109
- class WhoamiCommand(BaseUserCommand):
110
- def run(self):
111
- token = HfFolder.get_token()
112
- if token is None:
113
- print("Not logged in")
114
- exit()
115
- try:
116
- info = self._api.whoami(token)
117
- print(info["name"])
118
- orgs = [org["name"] for org in info["orgs"]]
119
- if orgs:
120
- print(ANSI.bold("orgs: "), ",".join(orgs))
121
-
122
- if ENDPOINT != "https://huggingface.co":
123
- print(f"Authenticated through private endpoint: {ENDPOINT}")
124
- except HTTPError as e:
125
- print(e)
126
- print(ANSI.red(e.response.text))
127
- exit(1)
128
-
129
-
130
- class RepoCreateCommand(BaseUserCommand):
131
- def run(self):
132
- token = HfFolder.get_token()
133
- if token is None:
134
- print("Not logged in")
135
- exit(1)
136
- try:
137
- stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
138
- print(ANSI.gray(stdout.strip()))
139
- except FileNotFoundError:
140
- print("Looks like you do not have git installed, please install.")
141
-
142
- try:
143
- stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
144
- print(ANSI.gray(stdout.strip()))
145
- except FileNotFoundError:
146
- print(
147
- ANSI.red(
148
- "Looks like you do not have git-lfs installed, please install."
149
- " You can install from https://git-lfs.github.com/."
150
- " Then run `git lfs install` (you only have to do this once)."
151
- )
152
- )
153
- print("")
154
-
155
- user = self._api.whoami(token)["name"]
156
- namespace = self.args.organization if self.args.organization is not None else user
157
-
158
- repo_id = f"{namespace}/{self.args.name}"
159
-
160
- if self.args.type not in REPO_TYPES:
161
- print("Invalid repo --type")
162
- exit(1)
163
-
164
- if self.args.type in REPO_TYPES_URL_PREFIXES:
165
- prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id
166
- else:
167
- prefixed_repo_id = repo_id
168
-
169
- print(f"You are about to create {ANSI.bold(prefixed_repo_id)}")
170
-
171
- if not self.args.yes:
172
- choice = input("Proceed? [Y/n] ").lower()
173
- if not (choice == "" or choice == "y" or choice == "yes"):
174
- print("Abort")
175
- exit()
176
- try:
177
- url = self._api.create_repo(
178
- repo_id=repo_id,
179
- token=token,
180
- repo_type=self.args.type,
181
- space_sdk=self.args.space_sdk,
182
- )
183
- except HTTPError as e:
184
- print(e)
185
- print(ANSI.red(e.response.text))
186
- exit(1)
187
- print("\nYour repo now lives at:")
188
- print(f" {ANSI.bold(url)}")
189
- print("\nYou can clone it locally with the command below, and commit/push as usual.")
190
- print(f"\n git clone {url}")
191
- print("")