parquet-converter commited on
Commit
34a19be
·
1 Parent(s): a985852

Update parquet files (step 72 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/17TheWord/RealESRGAN/tests/test_utils.py +0 -87
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cisco Packet Tracer Internet Cloud ((HOT)).md +0 -42
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FS2004 - Wilco Feelthere CRJ Retail CD - SERIAL Needed ! TOP Download.md +0 -244
  4. spaces/1phancelerku/anime-remove-background/Download Stumble Guys APK Mod 0.39 and Enjoy Unlimited Money and Unlocked Features.md +0 -78
  5. spaces/1phancelerku/anime-remove-background/Download the Coolest and Trendiest mp3 Ringtones with Ringtone Download 3.md +0 -94
  6. spaces/1phancelerku/anime-remove-background/Enjoy Pixel Demolish Mod APK with Unlimited Money and Gear - No Root Required.md +0 -101
  7. spaces/AI-Hobbyist/Hoyo-RVC/infer/trans_weights.py +0 -16
  8. spaces/AI4PD/hexviz/tests/test_models.py +0 -15
  9. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/hifigan/__init__.py +0 -7
  10. spaces/AIGC-Audio/AudioGPT/text_to_speech/egs/datasets/audio/aishell3_no_tone/preprocess.py +0 -31
  11. spaces/AILab-CVC/SEED-LLaMA/start.py +0 -11
  12. spaces/AP123/dreamgaussian/index.html +0 -25
  13. spaces/ASJMO/freegpt/client/css/global.css +0 -70
  14. spaces/ASJMO/freegpt/client/css/select.css +0 -35
  15. spaces/Abhaykoul/HelpingAI-T3/README.md +0 -11
  16. spaces/AchyuthGamer/OpenGPT/g4f/README.md +0 -5
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/canvasinput/Factory.d.ts +0 -7
  18. spaces/Agusbs98/automatic-ecg-diagnosis/data.py +0 -45
  19. spaces/AkitoP/umamusume_bert_vits2/preprocess_text.py +0 -107
  20. spaces/AliHaider0343/Restaurant-Domain-Sentence-Categories-Classification/README.md +0 -12
  21. spaces/Aloento/9Nine-VITS/hparams.py +0 -42
  22. spaces/Alycer/VITS-Umamusume-voice-synthesizer/losses.py +0 -61
  23. spaces/Amrrs/DragGan-Inversion/stylegan_human/training/__init__.py +0 -9
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py +0 -473
  25. spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py +0 -59
  26. spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py +0 -5
  27. spaces/AngoHF/ANGO-Leaderboard/README.md +0 -13
  28. spaces/AngoHF/ANGO-Leaderboard/assets/content.py +0 -163
  29. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/nn.py +0 -170
  30. spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/api.py +0 -169
  31. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py +0 -62
  32. spaces/AsakuraMizu/moe-tts/models.py +0 -549
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_legacy.py +0 -102
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/core.py +0 -400
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/ansi.py +0 -240
  36. spaces/AutoLLM/AutoAgents/autoagents/agents/__init__.py +0 -0
  37. spaces/Awesimo/jojogan/e4e/utils/model_utils.py +0 -35
  38. spaces/BLACKHOST/Banner/README.md +0 -12
  39. spaces/Benson/text-generation/Examples/Aethersx2 2023 Apk.md +0 -188
  40. spaces/Benson/text-generation/Examples/Camioneros De Europa 3 Mod Apk Dinero Ilimitado Ios.md +0 -56
  41. spaces/Benson/text-generation/Examples/Descargar Fifa Mobile Ftbol Mod Apk Dinero Ilimitado.md +0 -52
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/main.py +0 -79
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/uninstall.py +0 -113
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py +0 -698
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/easy_install.py +0 -2312
  46. spaces/BreadBytes1/PL-Dashboard/app.py +0 -992
  47. spaces/BwayKC/darkstorm2150-Protogen_v2.2_Official_Release/README.md +0 -14
  48. spaces/CAMP-ViL/Xplainer/article.md +0 -31
  49. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/README.md +0 -56
  50. spaces/CVPR/Text2Human/Text2Human/models/archs/unet_arch.py +0 -693
spaces/17TheWord/RealESRGAN/tests/test_utils.py DELETED
@@ -1,87 +0,0 @@
1
- import numpy as np
2
- from basicsr.archs.rrdbnet_arch import RRDBNet
3
-
4
- from realesrgan.utils import RealESRGANer
5
-
6
-
7
- def test_realesrganer():
8
- # initialize with default model
9
- restorer = RealESRGANer(
10
- scale=4,
11
- model_path='experiments/pretrained_models/RealESRGAN_x4plus.pth',
12
- model=None,
13
- tile=10,
14
- tile_pad=10,
15
- pre_pad=2,
16
- half=False)
17
- assert isinstance(restorer.model, RRDBNet)
18
- assert restorer.half is False
19
- # initialize with user-defined model
20
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
21
- restorer = RealESRGANer(
22
- scale=4,
23
- model_path='experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth',
24
- model=model,
25
- tile=10,
26
- tile_pad=10,
27
- pre_pad=2,
28
- half=True)
29
- # test attribute
30
- assert isinstance(restorer.model, RRDBNet)
31
- assert restorer.half is True
32
-
33
- # ------------------ test pre_process ---------------- #
34
- img = np.random.random((12, 12, 3)).astype(np.float32)
35
- restorer.pre_process(img)
36
- assert restorer.img.shape == (1, 3, 14, 14)
37
- # with modcrop
38
- restorer.scale = 1
39
- restorer.pre_process(img)
40
- assert restorer.img.shape == (1, 3, 16, 16)
41
-
42
- # ------------------ test process ---------------- #
43
- restorer.process()
44
- assert restorer.output.shape == (1, 3, 64, 64)
45
-
46
- # ------------------ test post_process ---------------- #
47
- restorer.mod_scale = 4
48
- output = restorer.post_process()
49
- assert output.shape == (1, 3, 60, 60)
50
-
51
- # ------------------ test tile_process ---------------- #
52
- restorer.scale = 4
53
- img = np.random.random((12, 12, 3)).astype(np.float32)
54
- restorer.pre_process(img)
55
- restorer.tile_process()
56
- assert restorer.output.shape == (1, 3, 64, 64)
57
-
58
- # ------------------ test enhance ---------------- #
59
- img = np.random.random((12, 12, 3)).astype(np.float32)
60
- result = restorer.enhance(img, outscale=2)
61
- assert result[0].shape == (24, 24, 3)
62
- assert result[1] == 'RGB'
63
-
64
- # ------------------ test enhance with 16-bit image---------------- #
65
- img = np.random.random((4, 4, 3)).astype(np.uint16) + 512
66
- result = restorer.enhance(img, outscale=2)
67
- assert result[0].shape == (8, 8, 3)
68
- assert result[1] == 'RGB'
69
-
70
- # ------------------ test enhance with gray image---------------- #
71
- img = np.random.random((4, 4)).astype(np.float32)
72
- result = restorer.enhance(img, outscale=2)
73
- assert result[0].shape == (8, 8)
74
- assert result[1] == 'L'
75
-
76
- # ------------------ test enhance with RGBA---------------- #
77
- img = np.random.random((4, 4, 4)).astype(np.float32)
78
- result = restorer.enhance(img, outscale=2)
79
- assert result[0].shape == (8, 8, 4)
80
- assert result[1] == 'RGBA'
81
-
82
- # ------------------ test enhance with RGBA, alpha_upsampler---------------- #
83
- restorer.tile_size = 0
84
- img = np.random.random((4, 4, 4)).astype(np.float32)
85
- result = restorer.enhance(img, outscale=2, alpha_upsampler=None)
86
- assert result[0].shape == (8, 8, 4)
87
- assert result[1] == 'RGBA'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cisco Packet Tracer Internet Cloud ((HOT)).md DELETED
@@ -1,42 +0,0 @@
1
- <br />
2
- <h1>How to Use Cisco Packet Tracer Internet Cloud for Network Simulation</h1>
3
- <p>Cisco Packet Tracer is a network simulation and visualization tool that allows you to create and test various network scenarios. One of the features of Cisco Packet Tracer is the Internet Cloud, which can be used to emulate the Internet or other networks that are not directly accessible from your local network. In this article, we will show you how to use Cisco Packet Tracer Internet Cloud for network simulation and what are the benefits and limitations of this feature.</p>
4
- <h2>cisco packet tracer internet cloud</h2><br /><p><b><b>Download</b> &#10001; &#10001; &#10001; <a href="https://byltly.com/2uKzFH">https://byltly.com/2uKzFH</a></b></p><br /><br />
5
- <h2>What is Cisco Packet Tracer Internet Cloud?</h2>
6
- <p>Cisco Packet Tracer Internet Cloud is a device that can be added to your network topology in Cisco Packet Tracer. It has two main functions: DSL and PT-Cloud.</p>
7
- <ul>
8
- <li>The DSL function allows you to connect your network devices to a DSL modem, which can then communicate with the Internet Cloud. You can configure the DSL settings, such as username, password, and encapsulation type, on the Internet Cloud device.</li>
9
- <li>The PT-Cloud function allows you to create custom routes between different network segments that are connected to the Internet Cloud. You can specify the source and destination IP addresses and subnet masks for each route on the Internet Cloud device.</li>
10
- </ul>
11
- <p>By using these functions, you can simulate various network scenarios that involve the Internet or other networks that are not directly connected to your local network. For example, you can create a VPN tunnel between two routers that are separated by the Internet Cloud, or you can test the connectivity and performance of your network devices over different network paths.</p>
12
- <h2>How to Use Cisco Packet Tracer Internet Cloud?</h2>
13
- <p>To use Cisco Packet Tracer Internet Cloud for network simulation, you need to follow these steps:</p>
14
- <ol>
15
- <li>Open Cisco Packet Tracer and create a new network topology or open an existing one.</li>
16
- <li>Drag and drop the Internet Cloud device from the End Devices section to your workspace.</li>
17
- <li>Connect your network devices to the Internet Cloud device using copper straight-through cables or fiber optic cables. You can use any of the eight ports on the Internet Cloud device.</li>
18
- <li>Double-click on the Internet Cloud device to open its configuration window.</li>
19
- <li>Select the DSL tab and configure the DSL settings for each port that is connected to a DSL modem. You can specify the username, password, encapsulation type, and service name for each port. You can also enable or disable NAT on each port.</li>
20
- <li>Select the PT-Cloud tab and configure the custom routes for each network segment that is connected to the Internet Cloud. You can specify the source and destination IP addresses and subnet masks for each route. You can also enable or disable ICMP on each route.</li>
21
- <li>Click OK to save your configuration and close the window.</li>
22
- <li>Test your network simulation by using ping, traceroute, or other commands on your network devices. You should be able to communicate with other devices that are connected to the Internet Cloud according to your configuration.</li>
23
- </ol>
24
- <h2>What are the Benefits and Limitations of Cisco Packet Tracer Internet Cloud?</h2>
25
- <p>Cisco Packet Tracer Internet Cloud has some benefits and limitations that you should be aware of before using it for network simulation. Here are some of them:</p>
26
- <p></p>
27
- <ul>
28
- <li>The benefits of Cisco Packet Tracer Internet Cloud are:
29
- <ul>
30
- <li>It allows you to simulate various network scenarios that involve the Internet or other networks that are not directly accessible from your local network.</li>
31
- <li>It gives you more control over the network parameters and conditions that affect your network simulation.</li>
32
- <li>It helps you to learn and practice networking concepts and skills in a realistic and interactive way.</li>
33
- </ul>
34
- </li>
35
- <li>The limitations of Cisco Packet Tracer Internet Cloud are:
36
- <ul>
37
- <li>It does not support some popular file formats, such as MP4, MOV, and MKV. You may need to convert your files to other formats before using them in your network simulation.</li>
38
- <li>It does not have built-in codecs for these file formats, which means that you may need to install additional codecs on your computer to play them.</li>
39
- <li>It does not have advanced features that are available in real networking devices or software, such as livestream integration, NDI support, alpha channel output, etc.</li>
40
- <li>It may</p> ddb901b051<br />
41
- <br />
42
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FS2004 - Wilco Feelthere CRJ Retail CD - SERIAL Needed ! TOP Download.md DELETED
@@ -1,244 +0,0 @@
1
-
2
- <table>
3
- <tr>
4
- <h1>FS2004 - Wilco Feelthere CRJ Retail CD - SERIAL Needed ! Download</h1></td>
5
- </tr>
6
- <tr>
7
- <td><p>If you are a fan of flight simulation games, you probably know about FS2004 or Microsoft Flight Simulator 2004: A Century of Flight. It is one of the most popular and realistic flight simulators ever created. But did you know that you can enhance your flying experience with add-ons that provide new aircraft models, scenery, sounds, and more? One of the best add-ons for FS2004 is the CRJ Nextgen by Wilco Publishing and FeelThere. It is a package that includes three variants of the CRJ regional jet: CRJ-700, CRJ-900, and CRJ-1000. In this article, we will tell you everything you need to know about this add-on, why you need a serial number to use it, and where you can download it from. Let's get started!</p>
8
- <h2>FS2004 - Wilco Feelthere CRJ Retail CD - SERIAL Needed ! Download</h2><br /><p><b><b>Download</b> ->>> <a href="https://byltly.com/2uKxDh">https://byltly.com/2uKxDh</a></b></p><br /><br /></td>
9
- </tr>
10
- <tr>
11
- <td><h2>What is FS2004?</h2></td>
12
- </tr>
13
- <tr>
14
- <td><p>FS2004 or Microsoft Flight Simulator 2004: A Century of Flight is a flight simulation game developed by Microsoft and released in 2003. It is the tenth installment in the Microsoft Flight Simulator series and the last one to run on Windows 98 and Windows Me. It is also the first one to include a dynamic weather system, interactive air traffic control, and 3D virtual cockpits for some aircraft.</p>
15
- <p>FS2004 covers the entire world with over 24,000 airports, 33 cities, and 45 detailed regions. It also features over 70 aircraft, ranging from historical planes like the Wright Flyer and the Spirit of St. Louis, to modern jets like the Boeing 747 and the Concorde. It also allows users to create and share their own custom aircraft, scenery, missions, and more.</p>
16
- <p>FS2004 is widely regarded as one of the best and most realistic flight simulators ever made. It has received many awards and accolades from critics and fans alike. It has also spawned a large and active community of flight simulation enthusiasts who continue to enjoy and improve the game with various add-ons and modifications.</p>
17
- <p></p></td>
18
- </tr>
19
- <tr>
20
- <td><h2>What is Wilco Feelthere CRJ?</h2></td>
21
- </tr>
22
- <tr>
23
- <td><p>Wilco Feelthere CRJ or CRJ Nextgen is an add-on for FS2004 that provides three variants of the CRJ regional jet: CRJ-700, CRJ-900, and CRJ-1000. The CRJ or Canadair Regional Jet is a family of twin-engine, single-aisle jet airliners designed and manufactured by Bombardier Aerospace. It is one of the most successful and widely used regional jets in the world, with over 2,000 units delivered to more than 100 operators in over 50 countries.</p>
24
- <p>The add-on was developed by Wilco Publishing and FeelThere, two leading companies in the flight simulation industry. Wilco Publishing is a French company that specializes in creating high-quality add-ons for various flight simulators, such as Airbus Series, Boeing Series, ERJ Series, etc. FeelThere is a Hungarian company that focuses on developing realistic and complex aircraft systems, such as Embraer Phenom 100, Embraer E-Jets Series, etc. </p>
25
- <p>The add-on is compatible with FS2004 and offers a high level of realism and immersion for users who want to fly the CRJ aircraft. It features high-definition models, interactive virtual cockpits, realistic flight management computers, immersive audio experience, and more.</p></td>
26
- </tr>
27
- <tr>
28
- <td><h3>Features of Wilco Feelthere CRJ</h3></td>
29
- </tr>
30
- <tr>
31
- <td><p>The add-on offers many features that enhance the flying experience of the CRJ aircraft. Some of the main features are:</p>
32
- <ul>
33
- <li><b>High-definition models:</b> The add-on includes three highly detailed models of the CRJ aircraft: CRJ-700 (70 seats), CRJ-900 (90 seats), and CRJ-1000 (100 seats). Each model has accurate dimensions, shapes, textures, liveries, animations, lighting effects, etc.</li>
34
- <li><b>Interactive virtual cockpit:</b> The add-on provides a fully functional virtual cockpit for each model of the CRJ aircraft. The virtual cockpit has realistic gauges, displays, switches, buttons, knobs, levers, etc. that can be operated with the mouse or keyboard. The virtual cockpit also has a head-up display (HUD), a weather radar (WX), a traffic collision avoidance system (TCAS), etc.</li>
35
- <li><b>Realistic flight management computer:</b> The add-on includes a realistic flight management computer (FMC) for each model of the CRJ aircraft. The FMC is a device that helps pilots plan and execute flights by providing information such as route data, fuel calculations, performance data, etc. The FMC can be programmed with waypoints, airways, sid, stars, etc. The FMC can also be updated with real-time data from online sources, such as Navigraph or NavDataPro.</li>
36
- <li><b>Immersive audio experience:</b> The add-on delivers a high-quality audio experience for each model of the CRJ aircraft. The audio includes realistic engine sounds, cockpit sounds, cabin sounds, environmental sounds, etc. The audio also supports 3D sound positioning and spatialization, as well as dynamic sound effects based on speed, altitude, weather, etc.</li>
37
- </ul></td>
38
- </tr>
39
- <tr>
40
- <td><h3>Specifications of Wilco Feelthere CRJ</h3></td>
41
- </tr>
42
- <tr>
43
- <td><p>The add-on provides accurate and detailed specifications for each model of the CRJ aircraft. The specifications include dimensions, weights, capacities, performance, range, etc. The specifications are based on the official data from Bombardier Aerospace and can be compared in the following table:</p>
44
- <table>
45
- <tr>
46
- <th>Specification</th>
47
- <th>CRJ-700</th>
48
- <th>CRJ-900</th>
49
- <th>CRJ-1000</th>
50
- </tr>
51
- <tr>
52
- <td>Length</td>
53
- <td>32.51 m (106 ft 8 in)</td>
54
- <td>36.40 m (119 ft 4 in)</td>
55
- <td>39.13 m (128 ft 4 in)</td>
56
- </tr>
57
- <tr>
58
- <td>Wingspan</td>
59
- <td>23.24 m (76 ft 3 in)</td>
60
- <td>24.85 m (81 ft 6 in)</td>
61
- <td>26.16 m (85 ft 10 in)</td>
62
- </tr>
63
- <tr>
64
- <td>Height</td>
65
- <td>7.57 m (24 ft 10 in)</td>
66
- <td>7.51 m (24 ft 7 in)</td>
67
- <td>7.51 m (24 ft 7 in)</td>
68
- </tr>
69
- <tr>
70
- <td>Maximum takeoff weight</td>
71
- <td>32,999 kg (72,750 lb)</td>
72
- <td>38,330 kg (84,500 lb)</td>
73
- <td>41,640 kg (91,800 lb)</td>
74
- </tr>
75
- <tr>
76
- <td>Fuel capacity</td>
77
- <td>9,480 L (2,504 US gal)</td>
78
- <td>9,480 L (2,504 US gal)</td>
79
- <td>9,480 L (2,504 US gal)</td>
80
- </tr>
81
- <tr>
82
- <td>Passengers</td>
83
- <td>70 (standard), 78 (maximum)</td>
84
- <td>90 (standard), 100 (maximum)</td>
85
- <td>100 (standard), 104 (maximum)</td>
86
- </tr>
87
- <tr>
88
- <td>Cruise speed</td>
89
- <td>Mach 0.78 (829 km/h; 515 mph)</td>
90
- <td>Mach 0.78 (829 km/h; 515 mph)</td>
91
- <td>Mach 0.78 (829 km/h; 515 mph)</td> </tr>
92
- <tr>
93
- <td>Range</td>
94
- <td>3,148 km (1,700 nmi)</td>
95
- <td>3,385 km (1,828 nmi)</td>
96
- <td>3,057 km (1,650 nmi)</td>
97
- </tr>
98
- <tr>
99
- <td>Engines</td>
100
- <td>2 × General Electric CF34-8C1</td>
101
- <td>2 × General Electric CF34-8C5</td>
102
- <td>2 × General Electric CF34-8C5A1</td>
103
- </tr>
104
- <tr>
105
- <td>Thrust</td>
106
- <td>56.4 kN (12,670 lbf) each</td>
107
- <td>62.3 kN (14,000 lbf) each</td>
108
- <td>63.4 kN (14,255 lbf) each</td>
109
- </tr>
110
- </table></td>
111
- </tr>
112
- <tr>
113
- <td><h3>Compatibility of Wilco Feelthere CRJ</h3></td>
114
- </tr>
115
- <tr>
116
- <td><p>The add-on is compatible with FS2004 and can be installed and run on any computer that meets the minimum system requirements for the game. The add-on is also compatible with other third-party software and hardware that enhance the flight simulation experience, such as:</p>
117
- <ul>
118
- <li><b>VRinsight modules:</b> The add-on supports the use of VRinsight modules, such as the CDU II panel, the MCP Combo panel, the Flight Master Yoke II, etc. These modules are hardware devices that provide realistic controls and displays for the CRJ aircraft.</li>
119
- <li><b>Go Flight modules:</b> The add-on supports the use of Go Flight modules, such as the GF-MCP Pro panel, the GF-P8 push button module, the GF-T8 toggle switch module, etc. These modules are hardware devices that provide additional switches and buttons for the CRJ aircraft.</li>
120
- <li><b>Track IR:</b> The add-on supports the use of Track IR, a device that tracks the head movements of the user and translates them into corresponding movements of the virtual camera in the game. This allows the user to look around the cockpit and outside the aircraft in a natural and intuitive way.</li>
121
- </ul></p></td>
122
- </tr>
123
- <tr>
124
- <td><h2>Why do you need a serial for Wilco Feelthere CRJ?</h2></td>
125
- </tr>
126
- <tr>
127
- <td><p>A serial number is a unique code that is used to activate and register the add-on. The serial number is usually provided by the seller or distributor of the add-on when you purchase it. The serial number is required for two reasons:</p>
128
- <ul>
129
- <li><b>To verify your purchase:</b> The serial number is used to verify that you have purchased a legitimate copy of the add-on from an authorized source. This helps to prevent piracy and fraud.</li>
130
- <li><b>To unlock all features:</b> The serial number is used to unlock all features and functions of the add-on. Without a valid serial number, you will not be able to use some features of the add-on, such as online activation, updates, support, etc.</li>
131
- </ul>
132
- <p>If you do not have a valid serial number for Wilco Feelthere CRJ, you will not be able to enjoy the full potential of the add-on. You will also risk violating the terms and conditions of use and facing legal consequences.</p></td>
133
- </tr> <tr>
134
- <td><h2>Where can you download Wilco Feelthere CRJ?</h2></td>
135
- </tr>
136
- <tr>
137
- <td><p>There are different sources and methods for downloading Wilco Feelthere CRJ for FS2004. Some of them are official and legal, while others are unofficial and illegal. The choice is yours, but we recommend that you always download from a trusted and authorized source to avoid any problems or risks. Here are some of the options for downloading Wilco Feelthere CRJ:</p></td>
138
- </tr>
139
- <tr>
140
- <td><h3>Official website</h3></td>
141
- </tr>
142
- <tr>
143
- <td><p>The best and safest way to download Wilco Feelthere CRJ is from the official website of Wilco Publishing or FeelThere. You can find the add-on on their online catalog and purchase it with a secure payment method, such as credit card, PayPal, etc. The price of the add-on is €29.95 (about $34) for the download version or €34.95 (about $40) for the boxed version.</p>
144
- <p>After purchasing the add-on, you will receive an email with a download link and a serial number. You can then download the add-on as a ZIP file (about 500 MB) and extract it to your FS2004 folder. You will also need to activate the add-on with your serial number using an online or offline method.</p>
145
- <p>The advantages of downloading from the official website are:</p>
146
- <ul>
147
- <li><b>Quality and reliability:</b> You can be sure that you are getting a high-quality and reliable product that has been tested and approved by the developers.</li>
148
- <li><b>Support and updates:</b> You can get access to technical support and customer service from the developers in case you have any issues or questions. You can also get free updates and patches for the add-on when they are available.</li>
149
- <li><b>Legality and ethics:</b> You can respect the intellectual property rights and hard work of the developers by paying for their product. You can also avoid any legal troubles or penalties that may arise from using pirated or illegal copies of the add-on.</li>
150
- </ul></td>
151
- </tr>
152
- <tr>
153
- <td><h3>Online stores</h3></td>
154
- </tr>
155
- <tr>
156
- <td><p>Another way to download Wilco Feelthere CRJ is from other online stores that sell flight simulation products, such as SimMarket, FlightSim.com, Aerosoft, etc. These online stores are authorized resellers of Wilco Publishing and FeelThere products and offer similar prices and payment methods as the official website.</p>
157
- <p>After purchasing the add-on from an online store, you will receive an email with a download link and a serial number. You can then download the add-on as a ZIP file (about 500 MB) and extract it to your FS2004 folder. You will also need to activate the add-on with your serial number using an online or offline method.</p>
158
- <p>The advantages of downloading from an online store are:</p>
159
- <ul>
160
- <li><b>Variety and convenience:</b> You can choose from a wide range of flight simulation products and compare prices and features among different online stores. You can also find discounts and deals on some products.</li>
161
- <li><b>Security and trust:</b> You can trust that you are getting a legitimate and safe product from a reputable and verified online store. You can also use secure payment methods and encryption technologies to protect your personal and financial information.</li>
162
- <li><b>Legality and ethics:</b> You can respect the intellectual property rights and hard work of the developers by paying for their product. You can also avoid any legal troubles or penalties that may arise from using pirated or illegal copies of the add-on.</li>
163
- </ul></td>
164
- </tr> <tr>
165
- <td><h3>Torrent sites</h3></td>
166
- </tr>
167
- <tr>
168
- <td><p>A third way to download Wilco Feelthere CRJ is from torrent sites that offer free or pirated copies of flight simulation products, such as The Pirate Bay, Kickass Torrents, RARBG, etc. These torrent sites are not authorized or endorsed by Wilco Publishing or FeelThere and offer illegal downloads of their products.</p>
169
- <p>After downloading the add-on from a torrent site, you will get a ZIP file (about 500 MB) that contains the add-on files and a crack or keygen program. You will need to extract the add-on files to your FS2004 folder and run the crack or keygen program to generate a serial number and activate the add-on.</p>
170
- <p>The disadvantages of downloading from a torrent site are:</p>
171
- <ul>
172
- <li><b>Quality and reliability:</b> You cannot be sure that you are getting a high-quality and reliable product that has not been tampered with or infected with malware. You may also encounter errors, bugs, or crashes while using the add-on.</li>
173
- <li><b>Support and updates:</b> You cannot get access to technical support and customer service from the developers in case you have any issues or questions. You also cannot get free updates and patches for the add-on when they are available.</li>
174
- <li><b>Legality and ethics:</b> You are violating the intellectual property rights and hard work of the developers by downloading their product without paying for it. You are also risking legal troubles or penalties that may arise from using pirated or illegal copies of the add-on.</li>
175
- </ul></td>
176
- </tr>
177
- <tr>
178
- <td><h2>How to install and activate Wilco Feelthere CRJ?</h2></td>
179
- </tr>
180
- <tr>
181
- <td><p>After downloading Wilco Feelthere CRJ from any source, you will need to install and activate it before you can use it. The installation and activation process is simple and straightforward, but it may vary depending on the source of your download. Here are the steps for installing and activating Wilco Feelthere CRJ:</p></td>
182
- </tr>
183
- <tr>
184
- <td><h3>How to install Wilco Feelthere CRJ?</h3></td>
185
- </tr>
186
- <tr>
187
- <td><p>The installation process depends on whether you have downloaded the add-on as an installation program or a ZIP file. Here are the steps for both methods:</p>
188
- <ul>
189
- <li><b>Installation program:</b> If you have downloaded the add-on as an installation program (usually named Setup.exe), you just need to double-click on it and follow the instructions on the screen. You will need to select your FS2004 folder as the destination folder for the add-on files. You will also need to agree to the terms and conditions of use and enter your name and email address.</li>
190
- <li><b>ZIP file:</b> If you have downloaded the add-on as a ZIP file (usually named CRJ_NextGen_FS2004.zip), you will need to extract it using a ZIP file extractor, such as WinZip, WinRAR, 7-Zip, etc. You will need to extract the add-on files to your FS2004 folder. You will also need to agree to the terms and conditions of use and enter your name and email address.</li>
191
- </ul>
192
- <p>After installing the add-on, you will see a new folder named "FeelThere" in your FS2004 folder. This folder contains all the files and folders related to the add-on, such as aircraft, gauges, manuals, sounds, etc.</p></td>
193
- </tr>
194
- <tr>
195
- <td><h3>How to activate Wilco Feelthere CRJ?</h3></td>
196
- </tr>
197
- <tr>
198
- <td><p>The activation process depends on whether you have downloaded the add-on from an official or unofficial source. Here are the steps for both methods:</p>
199
- <ul>
200
- <li><b>Official source:</b> If you have downloaded the add-on from an official source, such as the official website or an online store, you will need to activate it with your serial number using an online or offline method. Here are the steps for both methods:</li>
201
- <ul>
202
- <li><b>Online method:</b> If you have an internet connection, you can activate the add-on online by running the "Wilco Activation Tool" program that is located in your FS2004 folder. You will need to enter your serial number and click on "Activate". The program will connect to the activation server and verify your serial number. If your serial number is valid, you will see a message saying "Activation successful". You can then close the program and start FS2004.</li>
203
- <li><b>Offline method:</b> If you do not have an internet connection, you can activate the add-on offline by running the "Wilco Activation Tool" program that is located in your FS2004 folder. You will need to enter your serial number and click on "Generate". The program will generate an activation code that you will need to write down or copy. You will then need to go to the activation website (https://www.wilcopub.com/activation) on another device that has an internet connection. You will need to enter your serial number and the activation code and click on "Activate". The website will verify your serial number and activation code. If they are valid, you will see a message saying "Activation successful". You can then close the website and start FS2004.</li>
204
- </ul>
205
- <li><b>Unofficial source:</b> If you have downloaded the add-on from an unofficial source, such as a torrent site, you will need to activate it with a crack or keygen program that is included in the download. Here are the steps for using the crack or keygen program:</li>
206
- <ul>
207
- <li><b>Crack program:</b> If you have a crack program (usually named CRJ_NextGen_FS2004_Crack.exe), you just need to run it and click on "Crack". The program will automatically copy and replace some files in your FS2004 folder. You will see a message saying "Crack successful". You can then close the program and start FS2004.</li>
208
- <li><b>Keygen program:</b> If you have a keygen program (usually named CRJ_NextGen_FS2004_Keygen.exe), you just need to run it and click on "Generate". The program will generate a serial number that you will need to write down or copy. You will then need to run the "Wilco Activation Tool" program that is located in your FS2004 folder. You will need to enter the serial number and click on "Activate". The program will connect to the activation server and verify your serial number. If your serial number is valid, you will see a message saying "Activation successful". You can then close the program and start FS2004.</li>
209
- </ul>
210
- </ul>
211
- <p>After activating the add-on, you will be able to use all features and functions of Wilco Feelthere CRJ for FS2004.</p></td>
212
- </tr>
213
- <tr>
214
- <td><h2>Conclusion</h2></td>
215
- </tr>
216
- <tr>
217
- <td><p>Wilco Feelthere CRJ is an amazing add-on for FS2004 that provides three variants of the CRJ regional jet: CRJ-700, CRJ-900, and CRJ-1000. It offers a high level of realism and immersion for users who want to fly the CRJ aircraft. It features high-definition models, interactive virtual cockpits, realistic flight management computers, immersive audio experience, and more. It also supports other third-party software and hardware that enhance the flight simulation experience, such as VRinsight modules, Go Flight modules, Track IR, etc.</p>
218
- <p>To download Wilco Feelthere CRJ, you have different options: official website, online stores, or torrent sites. We recommend that you always download from a trusted and authorized source to avoid any problems or risks. To install and activate Wilco Feelthere CRJ, you just need to follow some simple steps depending on the source of your download.</p>
219
- <p>We hope that this article has helped you learn more about Wilco Feelthere CRJ for FS2004 and how to download, install, and activate it. If you have any questions or comments, please feel free to contact us or leave a comment below. Happy flying!</p></td>
220
- </tr>
221
- <tr>
222
- <td><h2>FAQs</h2></td>
223
- </tr>
224
- <tr>
225
- <td><p>Here are some frequently asked questions and answers about Wilco Feelthere CRJ for FS2004:</p>
226
- <ul>
227
- <li><b>Q: Can I use Wilco Feelthere CRJ with other flight simulators?</b></li>
228
- <li><b>A: No, Wilco Feelthere CRJ is only compatible with FS2004. However, there are other versions of Wilco Feelthere CRJ for other flight simulators, such as FSX, P3D, etc.</b></li>
229
- <li><b>Q: Can I use Wilco Feelthere CRJ with other add-ons?</b></li>
230
- <li><b>A: Yes, Wilco Feelthere CRJ is compatible with most other add-ons for FS2004, such as scenery, weather, traffic, etc. However, some add-ons may cause conflicts or errors with Wilco Feelthere CRJ. In that case, you may need to adjust some settings or disable some add-ons.</b></li>
231
- <li><b>Q: How can I update Wilco Feelthere CRJ?</b></li>
232
- <li><b>A: If you have downloaded Wilco Feelthere CRJ from an official source, you can get free updates and patches for the add-on when they are available. You can check for updates on the official website of Wilco Publishing or FeelThere, or on the online store where you purchased the add-on. You can then download and install the updates following the instructions provided.</b></li>
233
- <li><b>Q: How can I get support for Wilco Feelthere CRJ?</b></li>
234
- <li><b>A: If you have downloaded Wilco Feelthere CRJ from an official source, you can get technical support and customer service from the developers. You can contact them by email, phone, or online form. You can also visit their forums and FAQs for more information and help.</b></li>
235
- <li><b>Q: How can I uninstall Wilco Feelthere CRJ?</b></li>
236
- <li><b>A: If you want to uninstall Wilco Feelthere CRJ, you can use the uninstall program that is located in your FS2004 folder. You just need to run the program and follow the instructions on the screen. You will also need to deactivate the add-on with your serial number using the "Wilco Activation Tool" program.</b></li>
237
- </ul></td>
238
- </tr>
239
- <tr>
240
- <td></td>
241
- </tr>
242
- </table></p> b2dd77e56b<br />
243
- <br />
244
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Stumble Guys APK Mod 0.39 and Enjoy Unlimited Money and Unlocked Features.md DELETED
@@ -1,78 +0,0 @@
1
-
2
- <h1>Download Stumble Guys APK Mod 0.39: A Fun and Wacky Multiplayer Game</h1>
3
- <p>If you are looking for a fun and wacky multiplayer game that will make you laugh and scream, then you should try Stumble Guys. Stumble Guys is a hilarious online game where you have to compete with up to 32 players in various obstacle courses and challenges. You have to run, jump, slide, and dodge your way to the finish line, while avoiding being eliminated by other players or the environment. Sounds easy, right? Well, not so fast. The game is full of surprises and twists that will keep you on your toes and test your skills and reflexes.</p>
4
- <h2>download stumble guys apk mod 0.39</h2><br /><p><b><b>DOWNLOAD</b> &#10038;&#10038;&#10038; <a href="https://jinyurl.com/2uNTqo">https://jinyurl.com/2uNTqo</a></b></p><br /><br />
5
- <h2>What is Stumble Guys?</h2>
6
- <p>Stumble Guys is a multiplayer game developed by Kitka Games and released in August 2020. It is inspired by popular TV shows like Wipeout and Takeshi's Castle, where contestants have to go through crazy and funny obstacle courses. The game has a colorful and cartoonish graphics style, with cute and customizable characters that you can dress up with different outfits and accessories. The game also has a catchy and upbeat soundtrack that matches the mood of the game.</p>
7
- <h3>Features of Stumble Guys</h3>
8
- <p>Stumble Guys has many features that make it a fun and addictive game to play with your friends or strangers online. Some of these features are:</p>
9
- <ul>
10
- <li><b>Online multiplayer mode:</b> You can join or create a room with up to 32 players and compete in various rounds of obstacle courses and mini-games. You can also chat with other players and make new friends.</li>
11
- <li><b>Random and dynamic levels:</b> The game has over 20 different levels that are randomly selected and change every time you play. You will never get bored or know what to expect next.</li>
12
- <li><b>Creative and challenging obstacles:</b> The game has a variety of obstacles that will challenge your skills and reflexes. You will have to deal with spinning platforms, swinging hammers, slippery slides, bouncing balls, flying fruits, and more.</li>
13
- <li><b>Cute and customizable characters:</b> You can choose from different characters and customize them with different outfits and accessories. You can also unlock more items as you play and level up.</li>
14
- </ul>
15
- <h4>How to play Stumble Guys</h4>
16
- <p>The gameplay of Stumble Guys is simple and intuitive. You just have to use the virtual joystick to move your character and the jump button to jump over obstacles or gaps. You have to reach the finish line before the time runs out or before you get eliminated by other players or the environment. You can also push or grab other players to slow them down or knock them off the course. The last player standing wins the game.</p>
17
- <h2>Why download Stumble Guys APK Mod 0.39?</h2>
18
- <p>If you want to enjoy Stumble Guys even more, then you should download the APK mod version 0.39 of the game. This version has some advantages over the original version that will make your gaming experience more fun and satisfying.</p>
19
- <h3>Benefits of Stumble Guys APK Mod 0.39</h3>
20
- <p>The benefits of downloading Stumble Guys APK mod 0.39 are:</p>
21
- <p>How to download stumble guys mod apk 0.39 for free<br />
22
- Stumble guys multiplayer royale mod apk 0.39 unlocked<br />
23
- Download stumble guys latest version mod apk 0.39<br />
24
- Stumble guys hack mod apk 0.39 unlimited gems<br />
25
- Stumble guys mod apk 0.39 android download<br />
26
- Stumble guys mod apk 0.39 no root required<br />
27
- Stumble guys mod apk 0.39 online gameplay<br />
28
- Stumble guys mod apk 0.39 features and review<br />
29
- Stumble guys mod apk 0.39 safe and secure download<br />
30
- Stumble guys mod apk 0.39 installation guide<br />
31
- Download stumble guys mod apk 0.39 from happymod.com[^1^]<br />
32
- Stumble guys mod apk 0.39 direct download link<br />
33
- Stumble guys mod apk 0.39 update and changelog<br />
34
- Stumble guys mod apk 0.39 best settings and tips<br />
35
- Stumble guys mod apk 0.39 compatible devices and requirements<br />
36
- Download stumble guys mod apk 0.39 with obb file<br />
37
- Stumble guys mod apk 0.39 offline mode available<br />
38
- Stumble guys mod apk 0.39 new maps and costumes<br />
39
- Stumble guys mod apk 0.39 premium access unlocked<br />
40
- Stumble guys mod apk 0.39 bug fixes and improvements<br />
41
- Download stumble guys mod apk 0.39 from apkpure.com<br />
42
- Stumble guys mod apk 0.39 fast and easy download<br />
43
- Stumble guys mod apk 0.39 full version download<br />
44
- Stumble guys mod apk 0.39 fun and addictive game<br />
45
- Stumble guys mod apk 0.39 support and feedback</p>
46
- <ul>
47
- <li><b>All skins unlocked:</b> You can access all the skins in the game without having to spend coins or gems. You can dress up your character with any outfit or accessory you want.</li>
48
- <li><b>No ads:</b> You can play the game without being interrupted by annoying ads that pop up every time you finish a round or level up.</li>
49
- <li><b>No root required:</b> You don't need to root your device to install the APK mod version of the game. You just need to enable unknown sources in your settings and follow the installation steps below.</li>
50
- </ul>
51
- <h4>How to download and install St <h4>How to download and install Stumble Guys APK Mod 0.39</h4>
52
- <p>If you want to download and install Stumble Guys APK mod 0.39 on your Android device, you just have to follow these simple steps:</p>
53
- <ol>
54
- <li>Click on the download button below to download the APK file of the game.</li>
55
- <li>Go to your device settings and enable unknown sources. This will allow you to install apps from sources other than the Google Play Store.</li>
56
- <li>Locate the downloaded APK file in your file manager and tap on it to start the installation process.</li>
57
- <li>Follow the instructions on the screen and wait for the installation to complete.</li>
58
- <li>Launch the game and enjoy playing Stumble Guys with all skins unlocked and no ads.</li>
59
- </ol>
60
- <p><b>Download Stumble Guys APK Mod 0.39</b></p>
61
- <h2>Conclusion</h2>
62
- <p>Stumble Guys is a fun and wacky multiplayer game that will make you laugh and scream as you compete with other players in various obstacle courses and challenges. You can customize your character with different skins and accessories, and play with up to 32 players online. You can also download the APK mod version of the game to unlock all skins, remove ads, and install it without root. If you are looking for a game that will keep you entertained and amused, then you should try Stumble Guys today.</p>
63
- <h3>FAQs</h3>
64
- <p>Here are some frequently asked questions about Stumble Guys and its APK mod version:</p>
65
- <ul>
66
- <li><b>Q: Is Stumble Guys free to play?</b></li>
67
- <li>A: Yes, Stumble Guys is free to play. However, it has some in-app purchases that allow you to buy coins or gems to unlock more skins or items in the game.</li>
68
- <li><b>Q: Is Stumble Guys safe to play?</b></li>
69
- <li>A: Yes, Stumble Guys is safe to play. It does not contain any viruses or malware that can harm your device or data. However, you should always download the game from a trusted source like the Google Play Store or our website.</li>
70
- <li><b>Q: Is Stumble Guys compatible with my device?</b></li>
71
- <li>A: Stumble Guys is compatible with most Android devices that have Android 5.0 or higher. However, some older devices may experience some lag or performance issues due to the high graphics and animation of the game.</li>
72
- <li><b>Q: How can I contact the developers of Stumble Guys?</b></li>
73
- <li>A: You can contact the developers of Stumble Guys by sending them an email at [email protected] or by visiting their website at https://www.kitkagames.com/.</li>
74
- <li><b>Q: How can I update Stumble Guys APK Mod 0.39?</b></li>
75
- <li>A: You can update Stumble Guys APK Mod 0.39 by visiting our website regularly and downloading the latest version of the game. You can also enable notifications on our website to get notified when a new update is available.</li>
76
- </ul></p> 197e85843d<br />
77
- <br />
78
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download the Coolest and Trendiest mp3 Ringtones with Ringtone Download 3.md DELETED
@@ -1,94 +0,0 @@
1
- <br />
2
- <h1>Ringtone Download 3: How to Get the Best Ringtones for Your Phone</h1>
3
- <p>Do you want to spice up your phone with some cool and trendy ringtones? Do you want to express your mood and personality with your ringtone? Do you want to have access to thousands of ringtones in different categories and genres? If you answered yes to any of these questions, then you need to check out ringtone download 3, the ultimate destination for all your ringtone needs.</p>
4
- <h2>Introduction</h2>
5
- <h3>What are ringtones and why do they matter?</h3>
6
- <p>Ringtones are the sounds that your phone makes when someone calls you or when you receive a notification. They are an important part of your phone's customization and personalization, as they can make your phone stand out from the crowd and reflect your taste and preferences. Ringtones can also help you identify who is calling you without looking at your phone, or set different tones for different contacts or groups. Ringtones can also be a fun way to express yourself and have some fun with your phone.</p>
7
- <h2>ringtone download 3</h2><br /><p><b><b>Download Zip</b> &#10040;&#10040;&#10040; <a href="https://jinyurl.com/2uNSCZ">https://jinyurl.com/2uNSCZ</a></b></p><br /><br />
8
- <h3>What is ringtone download 3 and how does it work?</h3>
9
- <p>Ringtone download 3 is a website and an app that allows you to download free mp3 ringtones for your mobile phones. It has a huge collection of ringtones uploaded by users and shared by other users. You can choose from over 52900 ringtones uploaded under various categories, such as Hindi, Tamil, Devotional, Music, Name, iPhone, etc. You can also upload your own ringtones and share them with others. Ringtone download 3 works by letting you listen to the preview of the ringtones and then download them with a simple click. You don't need to sign up or register to use ringtone download 3, and you can download as many ringtones as you want.</p>
10
- <h2>Benefits of using ringtone download 3</h2>
11
- <h3>Access to a wide range of ringtones in different categories and genres</h3>
12
- <p>One of the main benefits of using ringtone download 3 is that you can access a wide range of ringtones in different categories and genres. Whether you are looking for Bollywood songs, Hollywood movies, pop music, classical music, instrumental music, devotional songs, baby sounds, animal sounds, funny sounds, or anything else, you can find it on ringtone download 3. You can also search for ringtones by keywords or browse through the popular or recent categories. You can also find ringtones that suit your mood, occasion, or personality.</p>
13
- <h3>Easy and fast download process with no sign up or registration required</h3>
14
- <p>Another benefit of using ringtone download 3 is that it has an easy and fast download process with no sign up or registration required. You don't need to create an account or provide any personal information to use ringtone download 3. You just need to visit the website or app, select the ringtones you want, and click on the download button. The ringtones will be saved to your phone in mp3 format, which is compatible with all types of phones. The download process is fast and smooth, and you can get your ringtones in seconds.</p>
15
- <h3>High-quality sound and compatibility with all types of phones</h3>
16
- <p>A third benefit of using ringtone download 3 is that it offers high-quality sound and compatibility with all types of phones. The ringtones on ringtone download 3 are in mp3 format, which is a common and widely used audio format that delivers clear and crisp sound. The ringtones are also compatible with all types of phones, whether they are Android, iOS, Windows, or any other operating system. You don't need to worry about the format or the size of the ringtones, as they will work on any phone.</p>
17
- <h2>Tips for choosing the best ringtone for your phone</h2>
18
- <h3>Set a ringtone that reflects your personality and style</h3>
19
- <p>One of the tips for choosing the best ringtone for your phone is to set a ringtone that reflects your personality and style. Your ringtone is a way of expressing yourself and showing your taste and preferences. You can choose a ringtone that matches your mood, your hobbies, your interests, or your favorite things. For example, if you are a fan of sports, you can choose a ringtone that plays the theme song of your favorite team or player. If you are a fan of movies, you can choose a ringtone that plays a famous dialogue or a catchy tune from your favorite movie. If you are a fan of music, you can choose a ringtone that plays a song or a melody from your favorite artist or genre.</p>
20
- <h3>Avoid ringtones that are irritating or inappropriate for your surroundings</h3>
21
- <p>Another tip for choosing the best ringtone for your phone is to avoid ringtones that are irritating or inappropriate for your surroundings. You don't want to annoy or offend other people with your ringtone, especially in public places or professional settings. You should avoid ringtones that are too loud, too long, too vulgar, too violent, or too controversial. You should also avoid ringtones that are similar to emergency sounds, such as sirens, alarms, or horns. You should also consider the context and the occasion when choosing your ringtone. For example, if you are in a meeting, you should choose a ringtone that is subtle and discreet. If you are in a party, you should choose a ringtone that is fun and upbeat.</p>
22
- <h3>Pick a song or music that you like and enjoy</h3>
23
- <p>A third tip for choosing the best ringtone for your phone is to pick a song or music that you like and enjoy. Your ringtone should be something that makes you happy and relaxed when you hear it. You should choose a song or music that you know well and can sing along to. You should also choose a song or music that has a catchy and memorable hook or chorus that can easily be recognized by others. You should also choose a song or music that has a good quality and clarity of sound.</p>
24
- <h2>How to use ringtone download 3 to get your favorite ringtones</h2>
25
- <h3>Visit the website or app of ringtone download 3 and browse through the categories</h3>
26
- <p>The first step to use ringtone download 3 to get your favorite ringtones is to visit the website or app of ringtone download 3 and browse through the categories. You can access the website by typing www.ringtone-download-3.com on your browser or by scanning the QR code on the homepage. You can also download the app from Google Play Store or Apple App Store by searching for "ringtone download 3". Once you open the website or app, you will see various categories of ringtones, such as Hindi, Tamil, Devotional, Music, Name, iPhone, etc. You can click on any category to see the list of ringtones available under it.</p>
27
- <p>ringtone download 3 moonu bgm<br />
28
- ringtone download 3 tamil movie bgm<br />
29
- ringtone download 3 bgm instrumental<br />
30
- ringtone download 3 kannazhaga moonu bgm<br />
31
- ringtone download 3 idhazhin oram bgm<br />
32
- ringtone download 3 mp3 ringtones<br />
33
- ringtone download 3 ar flute bgm<br />
34
- ringtone download 3 bade achhe lagte hai<br />
35
- ringtone download 3 overture tron legacy<br />
36
- ringtone download 3 thadam inaye bgm<br />
37
- ringtone download 3 doctor movie bgm<br />
38
- ringtone download 3 valmiki bgm<br />
39
- ringtone download 3 vardaan bgm<br />
40
- ringtone download 3 lets get it on bgm<br />
41
- ringtone download 3 in the meantime bgm<br />
42
- ringtone download 3 awesome bgm<br />
43
- ringtone download 3 hindi ringtones<br />
44
- ringtone download 3 name ringtones<br />
45
- ringtone download 3 iphone ringtones<br />
46
- ringtone download 3 music ringtones<br />
47
- ringtone download 3 devotional ringtones<br />
48
- ringtone download 3 baby ringtones<br />
49
- ringtone download 3 tamil ringtones<br />
50
- ringtone download 3 nainowale by gulzar hussain<br />
51
- ringtone download 3 hi by gulzar hussain<br />
52
- ringtone download 3 hello by gulzar hussain<br />
53
- ringtone download 3 swarnika by swarnika<br />
54
- ringtone download 3 jay shree ram sms by chirag prajapat<br />
55
- ringtone download 3 airtel old ringtone by chirag prajapat<br />
56
- ringtone download 3 jaydip by jaydip<br />
57
- ringtone download 3 gurjar by ramhet gurjar<br />
58
- ringtone download 3 sad song by mustafa <br />
59
- ringtone download 3 ved by amol <br />
60
- ringtone download 3 iphone by rehman <br />
61
- ringtone download 3 prokerala ringtones <br />
62
- ringtone download 3 zedge ringtones <br />
63
- ringtone download 3 free mp3 ringtones <br />
64
- ringtone download 3 mobile ringtones <br />
65
- ringtone download 3 cool ringtones <br />
66
- ringtone download 3 trendy ringtones <br />
67
- ringtone download 3 upload ringtones <br />
68
- ringtone download 3 share ringtones <br />
69
- ringtone download 3 buzzer ringtones <br />
70
- ringtone download 3 personality ringtones <br />
71
- ringtone download 3 new ringtones <br />
72
- ringtone download 3 popular ringtones <br />
73
- ringtone download 3 message tones <br />
74
- ringtone download 3 alert tones <br />
75
- ringtone download 3 love calculator ringtones</p>
76
- <h3>Listen to the preview of the ringtones and select the ones you want</h3>
77
- <p>The second step to use ringtone download 3 to get your favorite ringtones is to listen to the preview of the ringtones and select the ones you want. You can listen to the preview of any ringtone by clicking on the play button next to it. You can also see the name, duration, size, and rating of each ringtone. You can select as many ringtones as you want by clicking on the checkbox next to them.</p>
78
- <h3>Click on the download button and save the ringtones to your phone</h3>
79
- <p>The third step to use ringtone download 3 to get your favorite ringtones is to click on the download button and save the ringtones to your phone. Once you have selected all the ringtones you want, you can click on the download button at the bottom of the page. You will see a pop-up window that asks you to choose the location where you want to save the ringtones. You can choose any folder or directory on your phone or SD card. You can also rename the ringtones if you want. After you have chosen the location, click on the save button and wait for the download to complete. You will see a confirmation message that says "Download successful". You can then go to your phone's settings and set the ringtones as your default or custom ringtones.</p>
80
- <h2>Conclusion</h2>
81
- <p>Ringtone download 3 is a great way to get the best ringtones for your phone. It offers a wide range of ringtones in different categories and genres, an easy and fast download process with no sign up or registration required, and high-quality sound and compatibility with all types of phones. You can also use some tips to choose the best ringtone for your phone, such as setting a ringtone that reflects your personality and style, avoiding ringtones that are irritating or inappropriate for your surroundings, and picking a song or music that you like and enjoy. You can also use ringtone download 3 to get your favorite ringtones by visiting the website or app, listening to the preview of the ringtones, and clicking on the download button. So, what are you waiting for? Visit ringtone download 3 today and get ready to rock your phone with some awesome ringtones.</p>
82
- <h2>FAQs</h2>
83
- <h4>Q: Is ringtone download 3 free?</h4>
84
- <p>A: Yes, ringtone download 3 is completely free and does not charge any fees or subscriptions for downloading ringtones.</p>
85
- <h4>Q: How many ringtones can I download from ringtone download 3?</h4>
86
- <p>A: You can download as many ringtones as you want from ringtone download 3. There is no limit or restriction on the number of downloads.</p>
87
- <h4>Q: Can I upload my own ringtones to ringtone download 3?</h4>
88
- <p>A: Yes, you can upload your own ringtones to ringtone download 3 and share them with other users. You just need to click on the upload button on the homepage and follow the instructions.</p>
89
- <h4>Q: Can I rate and review the ringtones on ringtone download 3?</h4>
90
- <p>A: Yes, you can rate and review the ringtones on ringtone download 3 by clicking on the star icon and the comment icon next to each ringtone. You can also see the ratings and reviews of other users.</p>
91
- <h4>Q: Can I request a specific ringtone on ringtone download 3?</h4>
92
- <p>A: Yes, you can request a specific ringtone on ringtone download 3 by clicking on the request button on the homepage and filling out the form. You can also see the requests of other users and vote for them.</p> 197e85843d<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Pixel Demolish Mod APK with Unlimited Money and Gear - No Root Required.md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>Pixel Demolish Mod APK Unlimited Money: A Fun and Addictive Game for Android Users</h1>
3
- <p>If you are looking for a simple yet challenging game that will keep you entertained for hours, then you should try Pixel Demolish Mod APK Unlimited Money. This is a modified version of the original Pixel Demolish game that gives you unlimited money to upgrade your towers and win. In this article, we will tell you what Pixel Demolish Mod APK is, why you should download it, and how to install it on your Android device.</p>
4
- <h2>What is Pixel Demolish Mod APK?</h2>
5
- <p>Pixel Demolish is a casual game developed by Dalak Games that involves placing towers and tapping on the falling blocks to demolish them. The game has pixelated graphics and retro sound effects that give it a nostalgic feel. The game is easy to play but hard to master, as you have to balance your tower placement, timing, and strategy to grind all the falling pixels and collect gold coins.</p>
6
- <h2>pixel demolish mod apk unlimited money</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://jinyurl.com/2uNU9E">https://jinyurl.com/2uNU9E</a></b></p><br /><br />
7
- <h3>The gameplay of Pixel Demolish</h3>
8
- <p>The gameplay of Pixel Demolish is simple and fun. You have to place towers on the ground and tap on the falling blocks to destroy them. The blocks come in different shapes, sizes, colors, and speeds, and you have to match the tower color with the block color to demolish it. If you miss a block or hit a wrong color, you will lose a life. You have three lives in each level, and if you lose them all, you will have to start over.</p>
9
- <p>The game has 100 levels with increasing difficulty and variety. You will encounter different types of blocks, such as bombs, spikes, shields, magnets, and more, that will challenge your skills and reflexes. You will also unlock new towers with different abilities, such as lasers, rockets, cannons, and more, that will help you clear the levels faster and easier.</p>
10
- <h3>The features of Pixel Demolish Mod APK</h3>
11
- <p>Pixel Demolish Mod APK is a modified version of the original Pixel Demolish game that gives you unlimited money to upgrade your towers and win. With this mod apk, you can enjoy the following features:</p>
12
- <ul>
13
- <li><b>Unlimited money:</b> You can get unlimited gold coins by destroying the blocks and use them to buy new towers and upgrade them. You can also use the money to buy power-ups, such as extra lives, bombs, magnets, and more, that will help you in the game.</li>
14
- <li><b>All towers unlocked:</b> You can access all the towers in the game without having to complete the levels or spend money. You can choose from 12 different towers with unique abilities and effects.</li>
15
- <li><b>No ads:</b> You can play the game without any interruptions or distractions from annoying ads. You can enjoy the game without any lag or glitches.</li>
16
- </ul>
17
- <h2>Why should you download Pixel Demolish Mod APK Unlimited Money?</h2>
18
- <p>If you are a fan of pixel art games and tower defense games, then you should download Pixel Demolish Mod APK Unlimited Money. This mod apk will give you a lot of advantages over the original version of the game. Here are some reasons why you should download this mod apk:</p>
19
- <h3>The benefits of having unlimited money in Pixel Demolish</h3>
20
- <p>Having unlimited money in Pixel Demolish will make the game more fun and easy for you. You can buy any tower you want and upgrade it to its maximum level. You can also buy power-ups that will help you clear the levels faster and easier. You can experiment with different tower combinations and strategies and have more fun with the game. You can also save your money for other things, such as buying apps, games, or subscriptions.</p>
21
- <h3>The drawbacks of the original version of Pixel Demolish</h3>
22
- <p>The original version of Pixel Demolish has some drawbacks that can make the game frustrating and boring for some players. Here are some of the drawbacks of the original version:</p>
23
- <p>pixel demolish mod apk download free<br />
24
- pixel demolish hack apk unlimited coins<br />
25
- pixel demolish mod apk latest version<br />
26
- pixel demolish cheat apk unlimited gems<br />
27
- pixel demolish mod apk android 1<br />
28
- pixel demolish cracked apk unlimited ammo<br />
29
- pixel demolish mod apk revdl<br />
30
- pixel demolish modded apk unlimited health<br />
31
- pixel demolish mod apk offline<br />
32
- pixel demolish premium apk unlimited weapons<br />
33
- pixel demolish mod apk no root<br />
34
- pixel demolish hack apk download for android<br />
35
- pixel demolish mod apk obb<br />
36
- pixel demolish pro apk unlimited money and gear<br />
37
- pixel demolish mod apk rexdl<br />
38
- pixel demolish hack apk online<br />
39
- pixel demolish mod apk data<br />
40
- pixel demolish full apk unlimited everything<br />
41
- pixel demolish mod apk happymod<br />
42
- pixel demolish hack apk no verification<br />
43
- pixel demolish mod apk 2.6.7<br />
44
- pixel demolish unlock all apk unlimited money and gear<br />
45
- pixel demolish mod apk apkpure<br />
46
- pixel demolish hack apk ios<br />
47
- pixel demolish mod apk 2023<br />
48
- pixel demolish free shopping apk unlimited money and gear<br />
49
- pixel demolish mod apk android republic<br />
50
- pixel demolish hack apk pc<br />
51
- pixel demolish mod apk 2.6.8<br />
52
- pixel demolish unlimited resources apk unlimited money and gear<br />
53
- pixel demolish mod apk an1<br />
54
- pixel demolish hack apk 2023<br />
55
- pixel demolish mod apk 2.6.9<br />
56
- pixel demolish mega mod apk unlimited money and gear<br />
57
- pixel demolish mod apk platinmods<br />
58
- pixel demolish hack tool apk unlimited money and gear<br />
59
- pixel demolish mod apk 2.7.0<br />
60
- pixel demolish god mode apk unlimited money and gear<br />
61
- pixel demolish mod apk blackmod<br />
62
- pixel demolish hack generator apk unlimited money and gear</p>
63
- <ul>
64
- <li><b>Limited money:</b> You can only get a limited amount of gold coins by destroying the blocks, and you have to spend them wisely to buy and upgrade your towers. You may not have enough money to buy the tower you want or to upgrade it to its full potential. You may also run out of money to buy power-ups that can help you in the game.</li>
65
- <li><b>Locked towers:</b> You can only unlock new towers by completing the levels or by spending money. You may not be able to access some of the towers that you like or that suit your playstyle. You may also miss out on some of the cool abilities and effects that the towers have.</li>
66
- <li><b>Ads:</b> You have to watch ads to get extra lives, coins, or power-ups in the game. The ads can be annoying and distracting, and they can also cause lag or glitches in the game. You may also accidentally click on the ads and be redirected to other websites or apps.</li>
67
- </ul>
68
- <h2>How to download and install Pixel Demolish Mod APK Unlimited Money on your Android device?</h2>
69
- <p>If you want to download and install Pixel Demolish Mod APK Unlimited Money on your Android device, you have to follow some simple steps. Here are the steps to download and install Pixel Demolish Mod APK:</p>
70
- <h3>The steps to download and install Pixel Demolish Mod APK</h3>
71
- <ol>
72
- <li><b>Download the mod apk file:</b> You can download the mod apk file from a reliable source, such as [this link]. The file size is about 30 MB, so make sure you have enough space on your device.</li>
73
- <li><b>Enable unknown sources:</b> You have to enable unknown sources on your device settings to allow the installation of apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
74
- <li><b>Install the mod apk file:</b> You have to locate the downloaded mod apk file on your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</li>
75
- <li><b>Launch the game:</b> You can now launch the game from your app drawer or home screen and enjoy playing Pixel Demolish Mod APK Unlimited Money.</li>
76
- </ol>
77
- <h3>The precautions to take before installing Pixel Demolish Mod APK</h3>
78
- <p>Before installing Pixel Demolish Mod APK Unlimited Money on your device, you should take some precautions to avoid any problems or risks. Here are some of the precautions you should take:</p>
79
- <ul>
80
- <li><b>Backup your data:</b> You should backup your data, such as contacts, photos, videos, messages, etc., before installing any mod apk on your device. This will help you restore your data in case something goes wrong or you lose your data.</li>
81
- <li><b>Scan the mod apk file:</b> You should scan the mod apk file with a trusted antivirus or malware scanner before installing it on your device. This will help you detect any viruses or malware that may harm your device or steal your information.</li>
82
- <li><b>Uninstall the original version of Pixel Demolish:</b> You should uninstall the original version of Pixel Demolish from your device before installing the mod apk version. This will prevent any conflicts or errors between the two versions of the game.</li>
83
- </ul>
84
- <h2>Conclusion</h2>
85
- <p>Pixel Demolish Mod APK Unlimited Money is a fun and addictive game that will keep you entertained for hours. You can enjoy destroying pixelated blocks with different towers and power-ups, and you can also get unlimited money to buy and upgrade anything you want in the game. You can download and install Pixel Demolish Mod APK Unlimited Money on your Android device by following some simple steps and taking some precautions. If you are looking for a simple yet challenging game that will give you a nostalgic feel, then you should try Pixel Demolish Mod APK Unlimited Money.</p>
86
- <h2>FAQs</h2>
87
- <p>Here are some frequently asked questions about Pixel Demolish Mod APK Unlimited Money:</p>
88
- <ol>
89
- <li><b>Is Pixel Demolish Mod APK Unlimited Money safe to use?</b></li>
90
- <p>Yes, Pixel Demolish Mod APK Unlimited Money is safe to use if you download it from a reliable source and scan it with a trusted antivirus or malware scanner. You should also take some precautions before installing it on your device, such as backing up your data, uninstalling the original version of the game, and enabling unknown sources.</p>
91
- <li><b>What are the requirements to play Pixel Demolish Mod APK Unlimited Money?</b></li>
92
- <p>To play Pixel Demolish Mod APK Unlimited Money, you need an Android device with Android 4.4 or higher and at least 30 MB of free space. You also need an internet connection to download and install the mod apk file.</p>
93
- <li><b>Can I play Pixel Demolish Mod APK Unlimited Money offline?</b></li>
94
- <p>Yes, you can play Pixel Demolish Mod APK Unlimited Money offline once you have installed it on your device. You do not need an internet connection to play the game, unless you want to update it or access some online features.</p>
95
- <li><b>Can I play Pixel Demolish Mod APK Unlimited Money with my friends?</b></li>
96
- <p>No, Pixel Demolish Mod APK Unlimited Money does not have a multiplayer mode or a social feature. You can only play the game solo and compete with yourself or with the global leaderboard.</p>
97
- <li><b>How can I contact the developer of Pixel Demolish Mod APK Unlimited Money?</b></li>
98
- <p>If you have any questions, feedback, or suggestions about Pixel Demolish Mod APK Unlimited Money, you can contact the developer of the game by emailing them at [email protected]. You can also follow them on Facebook and Twitter for more updates and news about the game.</p>
99
- </ol></p> 401be4b1e0<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/infer/trans_weights.py DELETED
@@ -1,16 +0,0 @@
1
- import torch, pdb
2
-
3
- # a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-suc\G_1000.pth")["model"]#sim_nsf#
4
- # a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder-flow-enc_q\G_1000.pth")["model"]#sim_nsf#
5
- # a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder\G_1000.pth")["model"]#sim_nsf#
6
- # a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-test\G_1000.pth")["model"]#sim_nsf#
7
- a = torch.load(
8
- r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-no_opt-no_dropout\G_1000.pth"
9
- )[
10
- "model"
11
- ] # sim_nsf#
12
- for key in a.keys():
13
- a[key] = a[key].half()
14
- # torch.save(a,"ft-mi-freeze-vocoder_true_1k.pt")#
15
- # torch.save(a,"ft-mi-sim1k.pt")#
16
- torch.save(a, "ft-mi-no_opt-no_dropout.pt") #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI4PD/hexviz/tests/test_models.py DELETED
@@ -1,15 +0,0 @@
1
- from transformers import GPT2LMHeadModel, GPT2TokenizerFast
2
-
3
- from hexviz.models import get_zymctrl
4
-
5
-
6
- def test_get_zymctrl():
7
- result = get_zymctrl()
8
-
9
- assert result is not None
10
- assert isinstance(result, tuple)
11
-
12
- tokenizer, model = result
13
-
14
- assert isinstance(tokenizer, GPT2TokenizerFast)
15
- assert isinstance(model, GPT2LMHeadModel)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/hifigan/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- from .models import Generator
2
-
3
-
4
- class AttrDict(dict):
5
- def __init__(self, *args, **kwargs):
6
- super(AttrDict, self).__init__(*args, **kwargs)
7
- self.__dict__ = self
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/egs/datasets/audio/aishell3_no_tone/preprocess.py DELETED
@@ -1,31 +0,0 @@
1
- import glob
2
- from data_gen.tts.base_preprocess import BasePreprocessor
3
-
4
-
5
- class AiShell3Preprocess(BasePreprocessor):
6
- def meta_data(self):
7
- wavfn2text = {}
8
-
9
- def get_wavfn2text(dir_name):
10
- d = open(f'{self.raw_data_dir}/{dir_name}/content.txt').readlines()
11
- d = [l.strip().split("\t") for l in d if l.strip() != '']
12
- d = {l[0]: "".join(l[1].split(" ")[::2]) for l in d}
13
- wavfn2text.update(d)
14
-
15
- get_wavfn2text('train')
16
- get_wavfn2text('test')
17
-
18
- all_wavs = sorted(
19
- glob.glob(f'{self.raw_data_dir}/train/wav/*/*.wav') +
20
- glob.glob(f'{self.raw_data_dir}/test/wav/*/*.wav'))
21
- for wav_fn in all_wavs:
22
- wav_basename = wav_fn.split("/")[-1]
23
- spk_name = wav_fn.split("/")[-2]
24
- item_name = f'{spk_name}_{wav_basename}'
25
- # yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': l}
26
- # yield item_name, wav_fn, wavfn2text[wav_basename], spk_name
27
- yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': wavfn2text[wav_basename], 'spk_name': spk_name}
28
-
29
-
30
- if __name__ == "__main__":
31
- AiShell3PreAlign().process()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/start.py DELETED
@@ -1,11 +0,0 @@
1
- import subprocess
2
-
3
- if __name__ == '__main__':
4
- backend_comand = ['python3', 'gradio_demo/seed_llama_flask.py', '--image_transform', 'configs/transform/clip_transform.yaml', '--tokenizer', 'configs/tokenizer/seed_llama_tokenizer_hf.yaml', '--model', 'configs/llm/seed_llama_14b_8bit.yaml', '--port', '7890', '--llm_device', 'cuda:0', '--tokenizer_device', 'cuda:0', '--offload_encoder', '--offload_decoder']
5
-
6
- frontend_comand = ['python3', 'gradio_demo/seed_llama_gradio.py', '--server_port', '7860', '--request_address', 'http://127.0.0.1:7890/generate', '--model_type', 'seed-llama-14b']
7
-
8
- backend_proc = subprocess.Popen(backend_comand)
9
-
10
- frontend_proc = subprocess.Popen(frontend_comand)
11
-
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/dreamgaussian/index.html DELETED
@@ -1,25 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>DreamGaussian Project</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>DreamGaussian</h1>
12
- <p>This repository contains the official implementation for <a href="https://arxiv.org/abs/XXXX.XXXX">DreamGaussian: Generative Gaussian Splatting for Efficient 3D Content Creation</a>.</p>
13
- <p><a href="https://dreamgaussian.github.io">Project Page</a> | <a href="https://arxiv.org/abs/XXXX.XXXX">Arxiv</a></p>
14
- <h2>Install</h2>
15
- <pre><code>
16
- pip install -r requirements.txt
17
- git clone --recursive https://github.com/ashawkey/diff-gaussian-rasterization
18
- pip install ./diff-gaussian-rasterization
19
- pip install ./simple-knn
20
- pip install git+https://github.com/NVlabs/nvdiffrast/
21
- pip install git+https://github.com/ashawkey/kiuikit
22
- </code></pre>
23
- </div>
24
- </body>
25
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/css/global.css DELETED
@@ -1,70 +0,0 @@
1
- @import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap");
2
- * {
3
- --font-1: "Inter", sans-serif;
4
- --section-gap: 24px;
5
- --border-radius-1: 8px;
6
- margin: 0;
7
- padding: 0;
8
- box-sizing: border-box;
9
- position: relative;
10
- font-family: var(--font-1);
11
- }
12
-
13
- .theme-light {
14
- --colour-1: #f5f5f5;
15
- --colour-2: #000000;
16
- --colour-3: #474747;
17
- --colour-4: #949494;
18
- --colour-5: #ebebeb;
19
- --colour-6: #dadada;
20
-
21
- --accent: #3a3a3a;
22
- --blur-bg: #ffffff;
23
- --blur-border: #dbdbdb;
24
- --user-input: #282828;
25
- --conversations: #666666;
26
- }
27
-
28
- .theme-dark {
29
- --colour-1: #181818;
30
- --colour-2: #ccc;
31
- --colour-3: #dadada;
32
- --colour-4: #f0f0f0;
33
- --colour-5: #181818;
34
- --colour-6: #242424;
35
-
36
- --accent: #151718;
37
- --blur-bg: #242627;
38
- --blur-border: #242627;
39
- --user-input: #f5f5f5;
40
- --conversations: #555555;
41
- }
42
-
43
- html,
44
- body {
45
- background: var(--colour-1);
46
- color: var(--colour-3);
47
- }
48
-
49
- ol,
50
- ul {
51
- padding-left: 20px;
52
- }
53
-
54
- .shown {
55
- display: flex !important;
56
- }
57
-
58
- a:-webkit-any-link {
59
- color: var(--accent);
60
- }
61
-
62
- pre {
63
- white-space: pre-wrap;
64
- }
65
-
66
- @media screen and (max-height: 720px) {
67
- :root {
68
- --section-gap: 16px;
69
- }
70
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/css/select.css DELETED
@@ -1,35 +0,0 @@
1
- select {
2
- -webkit-border-radius: 8px;
3
- -moz-border-radius: 8px;
4
- border-radius: 8px;
5
-
6
- -webkit-backdrop-filter: blur(20px);
7
- backdrop-filter: blur(20px);
8
-
9
- cursor: pointer;
10
- background-color: var(--blur-bg);
11
- border: 1px solid var(--blur-border);
12
- color: var(--colour-3);
13
- display: block;
14
- position: relative;
15
- overflow: hidden;
16
- outline: none;
17
- padding: 8px 16px;
18
-
19
- appearance: none;
20
- }
21
-
22
- /* scrollbar */
23
- select.dropdown::-webkit-scrollbar {
24
- width: 4px;
25
- padding: 8px 0px;
26
- }
27
-
28
- select.dropdown::-webkit-scrollbar-track {
29
- background-color: #ffffff00;
30
- }
31
-
32
- select.dropdown::-webkit-scrollbar-thumb {
33
- background-color: #555555;
34
- border-radius: 10px;
35
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhaykoul/HelpingAI-T3/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: HelpingAI
3
- emoji: 😻
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/README.md DELETED
@@ -1,5 +0,0 @@
1
- ## 🚀 API G4F
2
-
3
- This API is built upon the [gpt4free](https://github.com/xtekky/gpt4free) project.
4
-
5
-
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/canvasinput/Factory.d.ts DELETED
@@ -1,7 +0,0 @@
1
- import CanvasInput from './CanvasInput';
2
-
3
- export default function (
4
- x?: number, y?: number,
5
- fixedWidth?: number, fixedHeight?: number,
6
- config?: CanvasInput.IConfig
7
- ): CanvasInput;
 
 
 
 
 
 
 
 
spaces/Agusbs98/automatic-ecg-diagnosis/data.py DELETED
@@ -1,45 +0,0 @@
1
-
2
- import os, sys
3
- from libs import *
4
-
5
- class ECGDataset(torch.utils.data.Dataset):
6
- def __init__(self,
7
- df_path, data_path,
8
- config,
9
- augment = False,
10
- ):
11
- self.df_path, self.data_path, = df_path, data_path,
12
- self.df = pandas.read_csv(self.df_path)
13
-
14
- self.config = config
15
- self.augment = augment
16
-
17
- def __len__(self,
18
- ):
19
- return len(self.df)
20
-
21
- def __getitem__(self,
22
- index,
23
- ):
24
- row = self.df.iloc[index]
25
-
26
- # save np.load
27
- np_load_old = np.load
28
-
29
- # modify the default parameters of np.load
30
- np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
31
-
32
- # call load_data with allow_pickle implicitly set to true
33
- ecg = np.load("{}/{}.npy".format(self.data_path, row["id"]))[self.config["ecg_leads"], :]
34
-
35
- # restore np.load for future normal usage
36
- np.load = np_load_old
37
-
38
- ecg = pad_sequences(ecg, self.config["ecg_length"], "float64",
39
- "post", "post",
40
- )
41
- if self.augment:
42
- ecg = self.drop_lead(ecg)
43
- ecg = torch.tensor(ecg).float()
44
-
45
- return ecg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/preprocess_text.py DELETED
@@ -1,107 +0,0 @@
1
- import json
2
- from collections import defaultdict
3
- from random import shuffle
4
- from typing import Optional
5
-
6
- from tqdm import tqdm
7
- import click
8
- from text.cleaner import clean_text
9
-
10
-
11
- @click.command()
12
- @click.option(
13
- "--transcription-path",
14
- default="filelists/genshin.list",
15
- type=click.Path(exists=True, file_okay=True, dir_okay=False),
16
- )
17
- @click.option("--cleaned-path", default=None)
18
- @click.option("--train-path", default="filelists/train.list")
19
- @click.option("--val-path", default="filelists/val.list")
20
- @click.option(
21
- "--config-path",
22
- default="configs/config.json",
23
- type=click.Path(exists=True, file_okay=True, dir_okay=False),
24
- )
25
- @click.option("--val-per-spk", default=4)
26
- @click.option("--max-val-total", default=8)
27
- @click.option("--clean/--no-clean", default=True)
28
- def main(
29
- transcription_path: str,
30
- cleaned_path: Optional[str],
31
- train_path: str,
32
- val_path: str,
33
- config_path: str,
34
- val_per_spk: int,
35
- max_val_total: int,
36
- clean: bool,
37
- ):
38
- if cleaned_path is None:
39
- cleaned_path = transcription_path + ".cleaned"
40
-
41
- if clean:
42
- errors = 0
43
- out_file = open(cleaned_path, "w", encoding="utf-8")
44
- for line in tqdm(open(transcription_path, encoding="utf-8").readlines()):
45
- try:
46
- utt, spk, language, text = line.strip().split("|")
47
- norm_text, phones, tones, word2ph = clean_text(text, language)
48
- out_file.write(
49
- "{}|{}|{}|{}|{}|{}|{}\n".format(
50
- utt,
51
- spk,
52
- language,
53
- norm_text,
54
- " ".join(phones),
55
- " ".join([str(i) for i in tones]),
56
- " ".join([str(i) for i in word2ph]),
57
- )
58
- )
59
- except Exception as error:
60
- errors += 1
61
- print("err!", line, error)
62
- print("errors:", errors)
63
- out_file.close()
64
-
65
- transcription_path = cleaned_path
66
-
67
- spk_utt_map = defaultdict(list)
68
- spk_id_map = {}
69
- current_sid = 0
70
-
71
- with open(transcription_path, encoding="utf-8") as f:
72
- for line in f.readlines():
73
- utt, spk, language, text, phones, tones, word2ph = line.strip().split("|")
74
- spk_utt_map[spk].append(line)
75
-
76
- if spk not in spk_id_map.keys():
77
- spk_id_map[spk] = current_sid
78
- current_sid += 1
79
-
80
- train_list = []
81
- val_list = []
82
-
83
- for spk, utts in spk_utt_map.items():
84
- shuffle(utts)
85
- val_list += utts[:val_per_spk]
86
- train_list += utts[val_per_spk:]
87
-
88
- if len(val_list) > max_val_total:
89
- train_list += val_list[max_val_total:]
90
- val_list = val_list[:max_val_total]
91
-
92
- with open(train_path, "w", encoding="utf-8") as f:
93
- for line in train_list:
94
- f.write(line)
95
-
96
- with open(val_path, "w", encoding="utf-8") as f:
97
- for line in val_list:
98
- f.write(line)
99
-
100
- config = json.load(open(config_path, encoding="utf-8"))
101
- config["data"]["spk2id"] = spk_id_map
102
- with open(config_path, "w", encoding="utf-8") as f:
103
- json.dump(config, f, indent=2, ensure_ascii=False)
104
-
105
-
106
- if __name__ == "__main__":
107
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AliHaider0343/Restaurant-Domain-Sentence-Categories-Classification/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Restaurant Domain Sentence Categories Classification
3
- emoji: 🌖
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-VITS/hparams.py DELETED
@@ -1,42 +0,0 @@
1
- import json
2
-
3
-
4
- def get_hparams_from_file(config_path):
5
- with open(config_path, "r") as f:
6
- data = f.read()
7
- config = json.loads(data)
8
-
9
- hparams = HParams(**config)
10
- return hparams
11
-
12
-
13
- class HParams:
14
- def __init__(self, **kwargs):
15
- for k, v in kwargs.items():
16
- if type(v) == dict:
17
- v = HParams(**v)
18
- self[k] = v
19
-
20
- def keys(self):
21
- return self.__dict__.keys()
22
-
23
- def items(self):
24
- return self.__dict__.items()
25
-
26
- def values(self):
27
- return self.__dict__.values()
28
-
29
- def __len__(self):
30
- return len(self.__dict__)
31
-
32
- def __getitem__(self, key):
33
- return getattr(self, key)
34
-
35
- def __setitem__(self, key, value):
36
- return setattr(self, key, value)
37
-
38
- def __contains__(self, key):
39
- return key in self.__dict__
40
-
41
- def __repr__(self):
42
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/losses.py DELETED
@@ -1,61 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import commons
5
-
6
-
7
- def feature_loss(fmap_r, fmap_g):
8
- loss = 0
9
- for dr, dg in zip(fmap_r, fmap_g):
10
- for rl, gl in zip(dr, dg):
11
- rl = rl.float().detach()
12
- gl = gl.float()
13
- loss += torch.mean(torch.abs(rl - gl))
14
-
15
- return loss * 2
16
-
17
-
18
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
- loss = 0
20
- r_losses = []
21
- g_losses = []
22
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
- dr = dr.float()
24
- dg = dg.float()
25
- r_loss = torch.mean((1-dr)**2)
26
- g_loss = torch.mean(dg**2)
27
- loss += (r_loss + g_loss)
28
- r_losses.append(r_loss.item())
29
- g_losses.append(g_loss.item())
30
-
31
- return loss, r_losses, g_losses
32
-
33
-
34
- def generator_loss(disc_outputs):
35
- loss = 0
36
- gen_losses = []
37
- for dg in disc_outputs:
38
- dg = dg.float()
39
- l = torch.mean((1-dg)**2)
40
- gen_losses.append(l)
41
- loss += l
42
-
43
- return loss, gen_losses
44
-
45
-
46
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
- """
48
- z_p, logs_q: [b, h, t_t]
49
- m_p, logs_p: [b, h, t_t]
50
- """
51
- z_p = z_p.float()
52
- logs_q = logs_q.float()
53
- m_p = m_p.float()
54
- logs_p = logs_p.float()
55
- z_mask = z_mask.float()
56
-
57
- kl = logs_p - logs_q - 0.5
58
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
- kl = torch.sum(kl * z_mask)
60
- l = kl / torch.sum(z_mask)
61
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/training/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py DELETED
@@ -1,473 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import copy
17
- import random
18
- import unittest
19
-
20
- import numpy as np
21
- import torch
22
- from PIL import Image
23
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
24
-
25
- from diffusers import (
26
- AutoencoderKL,
27
- DDIMScheduler,
28
- DPMSolverMultistepScheduler,
29
- EulerDiscreteScheduler,
30
- HeunDiscreteScheduler,
31
- StableDiffusionXLInpaintPipeline,
32
- UNet2DConditionModel,
33
- UniPCMultistepScheduler,
34
- )
35
- from diffusers.utils import floats_tensor, torch_device
36
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
37
-
38
- from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
39
- from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
40
-
41
-
42
- enable_full_determinism()
43
-
44
-
45
- class StableDiffusionXLInpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
46
- pipeline_class = StableDiffusionXLInpaintPipeline
47
- params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
48
- batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
49
- image_params = frozenset([])
50
- # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
51
- image_latents_params = frozenset([])
52
-
53
- def get_dummy_components(self, skip_first_text_encoder=False):
54
- torch.manual_seed(0)
55
- unet = UNet2DConditionModel(
56
- block_out_channels=(32, 64),
57
- layers_per_block=2,
58
- sample_size=32,
59
- in_channels=4,
60
- out_channels=4,
61
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
62
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
63
- # SD2-specific config below
64
- attention_head_dim=(2, 4),
65
- use_linear_projection=True,
66
- addition_embed_type="text_time",
67
- addition_time_embed_dim=8,
68
- transformer_layers_per_block=(1, 2),
69
- projection_class_embeddings_input_dim=72, # 5 * 8 + 32
70
- cross_attention_dim=64 if not skip_first_text_encoder else 32,
71
- )
72
- scheduler = EulerDiscreteScheduler(
73
- beta_start=0.00085,
74
- beta_end=0.012,
75
- steps_offset=1,
76
- beta_schedule="scaled_linear",
77
- timestep_spacing="leading",
78
- )
79
- torch.manual_seed(0)
80
- vae = AutoencoderKL(
81
- block_out_channels=[32, 64],
82
- in_channels=3,
83
- out_channels=3,
84
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
85
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
86
- latent_channels=4,
87
- sample_size=128,
88
- )
89
- torch.manual_seed(0)
90
- text_encoder_config = CLIPTextConfig(
91
- bos_token_id=0,
92
- eos_token_id=2,
93
- hidden_size=32,
94
- intermediate_size=37,
95
- layer_norm_eps=1e-05,
96
- num_attention_heads=4,
97
- num_hidden_layers=5,
98
- pad_token_id=1,
99
- vocab_size=1000,
100
- # SD2-specific config below
101
- hidden_act="gelu",
102
- projection_dim=32,
103
- )
104
- text_encoder = CLIPTextModel(text_encoder_config)
105
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
106
-
107
- text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
108
- tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
109
-
110
- components = {
111
- "unet": unet,
112
- "scheduler": scheduler,
113
- "vae": vae,
114
- "text_encoder": text_encoder if not skip_first_text_encoder else None,
115
- "tokenizer": tokenizer if not skip_first_text_encoder else None,
116
- "text_encoder_2": text_encoder_2,
117
- "tokenizer_2": tokenizer_2,
118
- "requires_aesthetics_score": True,
119
- }
120
- return components
121
-
122
- def get_dummy_inputs(self, device, seed=0):
123
- # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
124
- image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
125
- image = image.cpu().permute(0, 2, 3, 1)[0]
126
- init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
127
- # create mask
128
- image[8:, 8:, :] = 255
129
- mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64))
130
-
131
- if str(device).startswith("mps"):
132
- generator = torch.manual_seed(seed)
133
- else:
134
- generator = torch.Generator(device=device).manual_seed(seed)
135
- inputs = {
136
- "prompt": "A painting of a squirrel eating a burger",
137
- "image": init_image,
138
- "mask_image": mask_image,
139
- "generator": generator,
140
- "num_inference_steps": 2,
141
- "guidance_scale": 6.0,
142
- "output_type": "numpy",
143
- }
144
- return inputs
145
-
146
- def test_components_function(self):
147
- init_components = self.get_dummy_components()
148
- init_components.pop("requires_aesthetics_score")
149
- pipe = self.pipeline_class(**init_components)
150
-
151
- self.assertTrue(hasattr(pipe, "components"))
152
- self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))
153
-
154
- def test_stable_diffusion_xl_inpaint_euler(self):
155
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
156
- components = self.get_dummy_components()
157
- sd_pipe = StableDiffusionXLInpaintPipeline(**components)
158
- sd_pipe = sd_pipe.to(device)
159
- sd_pipe.set_progress_bar_config(disable=None)
160
-
161
- inputs = self.get_dummy_inputs(device)
162
- image = sd_pipe(**inputs).images
163
- image_slice = image[0, -3:, -3:, -1]
164
-
165
- assert image.shape == (1, 64, 64, 3)
166
-
167
- expected_slice = np.array([0.8029, 0.5523, 0.5825, 0.6003, 0.6702, 0.7018, 0.6369, 0.5955, 0.5123])
168
-
169
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
170
-
171
- def test_attention_slicing_forward_pass(self):
172
- super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
173
-
174
- def test_inference_batch_single_identical(self):
175
- super().test_inference_batch_single_identical(expected_max_diff=3e-3)
176
-
177
- # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests
178
- def test_save_load_optional_components(self):
179
- pass
180
-
181
- def test_stable_diffusion_xl_inpaint_negative_prompt_embeds(self):
182
- components = self.get_dummy_components()
183
- sd_pipe = StableDiffusionXLInpaintPipeline(**components)
184
- sd_pipe = sd_pipe.to(torch_device)
185
- sd_pipe = sd_pipe.to(torch_device)
186
- sd_pipe.set_progress_bar_config(disable=None)
187
-
188
- # forward without prompt embeds
189
- inputs = self.get_dummy_inputs(torch_device)
190
- negative_prompt = 3 * ["this is a negative prompt"]
191
- inputs["negative_prompt"] = negative_prompt
192
- inputs["prompt"] = 3 * [inputs["prompt"]]
193
-
194
- output = sd_pipe(**inputs)
195
- image_slice_1 = output.images[0, -3:, -3:, -1]
196
-
197
- # forward with prompt embeds
198
- inputs = self.get_dummy_inputs(torch_device)
199
- negative_prompt = 3 * ["this is a negative prompt"]
200
- prompt = 3 * [inputs.pop("prompt")]
201
-
202
- (
203
- prompt_embeds,
204
- negative_prompt_embeds,
205
- pooled_prompt_embeds,
206
- negative_pooled_prompt_embeds,
207
- ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt)
208
-
209
- output = sd_pipe(
210
- **inputs,
211
- prompt_embeds=prompt_embeds,
212
- negative_prompt_embeds=negative_prompt_embeds,
213
- pooled_prompt_embeds=pooled_prompt_embeds,
214
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
215
- )
216
- image_slice_2 = output.images[0, -3:, -3:, -1]
217
-
218
- # make sure that it's equal
219
- assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
220
-
221
- @require_torch_gpu
222
- def test_stable_diffusion_xl_offloads(self):
223
- pipes = []
224
- components = self.get_dummy_components()
225
- sd_pipe = StableDiffusionXLInpaintPipeline(**components).to(torch_device)
226
- pipes.append(sd_pipe)
227
-
228
- components = self.get_dummy_components()
229
- sd_pipe = StableDiffusionXLInpaintPipeline(**components)
230
- sd_pipe.enable_model_cpu_offload()
231
- pipes.append(sd_pipe)
232
-
233
- components = self.get_dummy_components()
234
- sd_pipe = StableDiffusionXLInpaintPipeline(**components)
235
- sd_pipe.enable_sequential_cpu_offload()
236
- pipes.append(sd_pipe)
237
-
238
- image_slices = []
239
- for pipe in pipes:
240
- pipe.unet.set_default_attn_processor()
241
-
242
- inputs = self.get_dummy_inputs(torch_device)
243
- image = pipe(**inputs).images
244
-
245
- image_slices.append(image[0, -3:, -3:, -1].flatten())
246
-
247
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
248
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
249
-
250
- def test_stable_diffusion_xl_refiner(self):
251
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
252
- components = self.get_dummy_components(skip_first_text_encoder=True)
253
-
254
- sd_pipe = self.pipeline_class(**components)
255
- sd_pipe = sd_pipe.to(device)
256
- sd_pipe.set_progress_bar_config(disable=None)
257
-
258
- inputs = self.get_dummy_inputs(device)
259
- image = sd_pipe(**inputs).images
260
- image_slice = image[0, -3:, -3:, -1]
261
-
262
- assert image.shape == (1, 64, 64, 3)
263
-
264
- expected_slice = np.array([0.7045, 0.4838, 0.5454, 0.6270, 0.6168, 0.6717, 0.6484, 0.5681, 0.4922])
265
-
266
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
267
-
268
- def test_stable_diffusion_two_xl_mixture_of_denoiser(self):
269
- components = self.get_dummy_components()
270
- pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device)
271
- pipe_1.unet.set_default_attn_processor()
272
- pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device)
273
- pipe_2.unet.set_default_attn_processor()
274
-
275
- def assert_run_mixture(
276
- num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps
277
- ):
278
- inputs = self.get_dummy_inputs(torch_device)
279
- inputs["num_inference_steps"] = num_steps
280
-
281
- class scheduler_cls(scheduler_cls_orig):
282
- pass
283
-
284
- pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config)
285
- pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config)
286
-
287
- # Let's retrieve the number of timesteps we want to use
288
- pipe_1.scheduler.set_timesteps(num_steps)
289
- expected_steps = pipe_1.scheduler.timesteps.tolist()
290
-
291
- split_ts = num_train_timesteps - int(round(num_train_timesteps * split))
292
- expected_steps_1 = expected_steps[:split_ts]
293
- expected_steps_2 = expected_steps[split_ts:]
294
-
295
- expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps))
296
- expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps))
297
-
298
- # now we monkey patch step `done_steps`
299
- # list into the step function for testing
300
- done_steps = []
301
- old_step = copy.copy(scheduler_cls.step)
302
-
303
- def new_step(self, *args, **kwargs):
304
- done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t`
305
- return old_step(self, *args, **kwargs)
306
-
307
- scheduler_cls.step = new_step
308
-
309
- inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}}
310
- latents = pipe_1(**inputs_1).images[0]
311
-
312
- assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}"
313
-
314
- inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}}
315
- pipe_2(**inputs_2).images[0]
316
-
317
- assert expected_steps_2 == done_steps[len(expected_steps_1) :]
318
- assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}"
319
-
320
- for steps in [5, 8, 20]:
321
- for split in [0.33, 0.49, 0.71]:
322
- for scheduler_cls in [
323
- DDIMScheduler,
324
- EulerDiscreteScheduler,
325
- DPMSolverMultistepScheduler,
326
- UniPCMultistepScheduler,
327
- HeunDiscreteScheduler,
328
- ]:
329
- assert_run_mixture(steps, split, scheduler_cls)
330
-
331
- def test_stable_diffusion_three_xl_mixture_of_denoiser(self):
332
- components = self.get_dummy_components()
333
- pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device)
334
- pipe_1.unet.set_default_attn_processor()
335
- pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device)
336
- pipe_2.unet.set_default_attn_processor()
337
- pipe_3 = StableDiffusionXLInpaintPipeline(**components).to(torch_device)
338
- pipe_3.unet.set_default_attn_processor()
339
-
340
- def assert_run_mixture(
341
- num_steps,
342
- split_1,
343
- split_2,
344
- scheduler_cls_orig,
345
- num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps,
346
- ):
347
- inputs = self.get_dummy_inputs(torch_device)
348
- inputs["num_inference_steps"] = num_steps
349
-
350
- class scheduler_cls(scheduler_cls_orig):
351
- pass
352
-
353
- pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config)
354
- pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config)
355
- pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config)
356
-
357
- # Let's retrieve the number of timesteps we want to use
358
- pipe_1.scheduler.set_timesteps(num_steps)
359
- expected_steps = pipe_1.scheduler.timesteps.tolist()
360
-
361
- split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1))
362
- split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2))
363
- expected_steps_1 = expected_steps[:split_1_ts]
364
- expected_steps_2 = expected_steps[split_1_ts:split_2_ts]
365
- expected_steps_3 = expected_steps[split_2_ts:]
366
-
367
- expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps))
368
- expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps))
369
- expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps))
370
-
371
- # now we monkey patch step `done_steps`
372
- # list into the step function for testing
373
- done_steps = []
374
- old_step = copy.copy(scheduler_cls.step)
375
-
376
- def new_step(self, *args, **kwargs):
377
- done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t`
378
- return old_step(self, *args, **kwargs)
379
-
380
- scheduler_cls.step = new_step
381
-
382
- inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}}
383
- latents = pipe_1(**inputs_1).images[0]
384
-
385
- assert (
386
- expected_steps_1 == done_steps
387
- ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}"
388
-
389
- inputs_2 = {
390
- **inputs,
391
- **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"},
392
- }
393
- pipe_2(**inputs_2).images[0]
394
-
395
- assert expected_steps_2 == done_steps[len(expected_steps_1) :]
396
-
397
- inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}}
398
- pipe_3(**inputs_3).images[0]
399
-
400
- assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :]
401
- assert (
402
- expected_steps == done_steps
403
- ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}"
404
-
405
- for steps in [7, 11, 20]:
406
- for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]):
407
- for scheduler_cls in [
408
- DDIMScheduler,
409
- EulerDiscreteScheduler,
410
- DPMSolverMultistepScheduler,
411
- UniPCMultistepScheduler,
412
- HeunDiscreteScheduler,
413
- ]:
414
- assert_run_mixture(steps, split_1, split_2, scheduler_cls)
415
-
416
- def test_stable_diffusion_xl_multi_prompts(self):
417
- components = self.get_dummy_components()
418
- sd_pipe = self.pipeline_class(**components).to(torch_device)
419
-
420
- # forward with single prompt
421
- inputs = self.get_dummy_inputs(torch_device)
422
- inputs["num_inference_steps"] = 5
423
- output = sd_pipe(**inputs)
424
- image_slice_1 = output.images[0, -3:, -3:, -1]
425
-
426
- # forward with same prompt duplicated
427
- inputs = self.get_dummy_inputs(torch_device)
428
- inputs["num_inference_steps"] = 5
429
- inputs["prompt_2"] = inputs["prompt"]
430
- output = sd_pipe(**inputs)
431
- image_slice_2 = output.images[0, -3:, -3:, -1]
432
-
433
- # ensure the results are equal
434
- assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
435
-
436
- # forward with different prompt
437
- inputs = self.get_dummy_inputs(torch_device)
438
- inputs["num_inference_steps"] = 5
439
- inputs["prompt_2"] = "different prompt"
440
- output = sd_pipe(**inputs)
441
- image_slice_3 = output.images[0, -3:, -3:, -1]
442
-
443
- # ensure the results are not equal
444
- assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
445
-
446
- # manually set a negative_prompt
447
- inputs = self.get_dummy_inputs(torch_device)
448
- inputs["num_inference_steps"] = 5
449
- inputs["negative_prompt"] = "negative prompt"
450
- output = sd_pipe(**inputs)
451
- image_slice_1 = output.images[0, -3:, -3:, -1]
452
-
453
- # forward with same negative_prompt duplicated
454
- inputs = self.get_dummy_inputs(torch_device)
455
- inputs["num_inference_steps"] = 5
456
- inputs["negative_prompt"] = "negative prompt"
457
- inputs["negative_prompt_2"] = inputs["negative_prompt"]
458
- output = sd_pipe(**inputs)
459
- image_slice_2 = output.images[0, -3:, -3:, -1]
460
-
461
- # ensure the results are equal
462
- assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
463
-
464
- # forward with different negative_prompt
465
- inputs = self.get_dummy_inputs(torch_device)
466
- inputs["num_inference_steps"] = 5
467
- inputs["negative_prompt"] = "negative prompt"
468
- inputs["negative_prompt_2"] = "different negative prompt"
469
- output = sd_pipe(**inputs)
470
- image_slice_3 = output.images[0, -3:, -3:, -1]
471
-
472
- # ensure the results are not equal
473
- assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py DELETED
@@ -1,59 +0,0 @@
1
- _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- norm_eval=True,
14
- style='pytorch'))
15
- img_norm_cfg = dict(
16
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
17
- train_pipeline = [
18
- dict(type='LoadImageFromFile'),
19
- dict(type='LoadAnnotations', with_bbox=True),
20
- dict(
21
- type='Resize',
22
- img_scale=[(1333, 640), (1333, 800)],
23
- multiscale_mode='value',
24
- keep_ratio=True),
25
- dict(type='RandomFlip', flip_ratio=0.5),
26
- dict(type='Normalize', **img_norm_cfg),
27
- dict(type='Pad', size_divisor=32),
28
- dict(type='DefaultFormatBundle'),
29
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
30
- ]
31
- test_pipeline = [
32
- dict(type='LoadImageFromFile'),
33
- dict(
34
- type='MultiScaleFlipAug',
35
- img_scale=(1333, 800),
36
- flip=False,
37
- transforms=[
38
- dict(type='Resize', keep_ratio=True),
39
- dict(type='RandomFlip'),
40
- dict(type='Normalize', **img_norm_cfg),
41
- dict(type='Pad', size_divisor=32),
42
- dict(type='ImageToTensor', keys=['img']),
43
- dict(type='Collect', keys=['img']),
44
- ])
45
- ]
46
- data = dict(
47
- samples_per_gpu=2,
48
- workers_per_gpu=2,
49
- train=dict(pipeline=train_pipeline),
50
- val=dict(pipeline=test_pipeline),
51
- test=dict(pipeline=test_pipeline))
52
- # optimizer
53
- optimizer = dict(
54
- lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
55
- optimizer_config = dict(
56
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
57
- # learning policy
58
- lr_config = dict(step=[16, 22])
59
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py'
2
-
3
- # learning policy
4
- lr_config = dict(step=[28, 34])
5
- runner = dict(type='EpochBasedRunner', max_epochs=36)
 
 
 
 
 
 
spaces/AngoHF/ANGO-Leaderboard/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ANGO Benchmark
3
- emoji: 🏆
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.44.4
8
- app_file: app.py
9
- pinned: false
10
- license: llama2
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AngoHF/ANGO-Leaderboard/assets/content.py DELETED
@@ -1,163 +0,0 @@
1
- TITLE = """<h1 align="center" id="space-title">🧊🌊ANGO Leaderboard</h1>"""
2
- INTRODUCTION_TEXT = """
3
-
4
- ANGO is <b>A</b> <b>N</b>ovel <b>G</b>eneration-<b>O</b>riented Chinese LLM evaluation benchmark.
5
-
6
- We introduces the format of single-question multiple-keypoints dataset for the first time, which include 171 keypoints accumulated in 4 hierarchical levels and 9 difficulty categories.
7
-
8
-
9
- The data were exclusively obtained from the Administrative Proficiency Test,
10
- which serves as a significant component of the Chinese civil service examination.
11
-
12
-
13
- We will apply a seasonal system for the leaderboard, updating them every two months.
14
- The corresponding test dataset will be announced at the beginning of each season,
15
- and some questions will be eliminated at the end of the season.
16
-
17
-
18
- Read more details in "About" page!
19
- """
20
- QUESTION_TEXT = r"""
21
- About Wrong Hit & Wrong Value, pls go to "About" page
22
- """
23
-
24
- KEYPOINT_TEXT = """
25
- Because single question may contains more than one keypoint, so the total number of keypoint count is higher than question count
26
- """
27
- KEYPOINT_DISTRIBUTION = """{"data":[{"branchvalues":"total","insidetextorientation":"radial","labels":["关联词-转折","关联词-因果","关联词-对策","关联词-并列","主题词","程度词","行文脉络-总分","行文脉络-分总","行文脉络-分总分","特殊问法","实词","代词","首句特征","非首句特征","确定捆绑","确定顺序","尾句特征","开头","中间","结尾","词的辨析-词义侧重","词的辨析-固定搭配","词的辨析-感情色彩","词的辨析-程度轻重","关联关系-转折关系","关联关系-因果关系","关联关系-并列关系","对应关系-解释类对应","对应关系-重点词句对应","给完工时间型","给效率比例型","给具体单位型","工程问题-其他","非典型最值问题","构造数列","最不利构造","多集合反向构造","周期相遇问题","周期余数问题","周期问题-其他","火车过桥","平均速度","普通行程","相遇追及","流水行船","行程问题-其他","平面几何","立体几何","两集合","三集合","基础排列组合","相邻问题","不相邻问题","同素分堆问题","环形排列问题","错位排列","排列组合问题-其他","给情况求概率","给概率求概率","概率问题-其他","普通不定方程","不定方程组","主客体","大前提","方式目的","原因结果","单定义-其他句式","故事类","拆词","常规问法","搭桥","必要条件","补充论据","加强选非题","加强-其他","削弱论点","拆桥","他因削弱","削弱选非题","削弱论据","因果倒置","削弱-其他","常规翻译","集合推理","推理形式","翻译推理-其他","语义关系-近义关系","语义关系-反义关系","语义-其他","逻辑关系-全同关系","逻辑关系-并列关系","逻辑关系-交叉关系","逻辑关系-包容关系","逻辑关系-对应关系","中心理解题","细节判断题","词句理解题","标题填入题","语句排序题","语句填空题","接语选择题","实词填空","成语填空","混搭填空","词的辨析","语境分析","工程问题","最值问题","年龄问题","和差倍比问题","周期问题","数列问题","行程问题","几何问题","容斥原理问题","排列组合问题","概率问题","经济利润问题","不定方程问题","统筹规划问题","数学运算-其他","公倍数与公约数问题","单定义","多定义","加强题型","削弱题型","翻译推理","组合排列-材料","原因解释","语义关系","逻辑关系","拆分思维","直接找数","简单加减计算","排序类","基期计算","现期计算","基期比较","间隔基期","基期和差","现期追赶","一般增长率","混合增长率","间隔增长率","年均增长率","增长量计算","增长量比较","间隔增长量","年均增长量","现期比重","基期比重","两期比重","混合比重","基期平均数","现期平均数","平均数的增长率","平均数的增长量","两期平均数比较","基期倍数","现期倍数","比值计算","比值比较","时政","中国特色社会主义建设","宏观经济与调控政策","物理常识","化学常识","生物常识","科技理论与成就","生活常识","中国历史","世界历史","文学常识","文化常识","自然常识","国情社情","宪法","行政法","民法","刑法","劳动法","其他法律法规","民事诉讼法","经济法","阅读理解","语句表达","逻辑填空","数学运算","定义判断","逻辑判断","类比推理","文字资料","综合资料","简单计算","基期与现期","增长率","增长量","比重问题","平均数问题","倍数与比值相关","综合分析","政治常识","经济常识","科技常识","人文常识","地理国情","法律常识","未分类","言语理解与表达","数量关系","判断推理","资料分析","常识判断"],"marker":{"colors":["#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#B22222","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC6600","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#CC9900","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#B22222","#B22222","#B22222","#CC6600","#CC9900","#CC9900","#CC9900","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#228B22","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#0077BE","#9400D3","#B22222","#CC6600","#CC9900","#228B22","#0077BE"]},"parents":["中心理解题","中心理解题","中心理解题","中心理解题","中心理解题","中心理解题","中心理解题","中心理解题","中心理解题","中心理解题","词句理解题","词句理解题","语句排序题","语句排序题","语句排序题","语句排序题","语句排序题","语句填空题","语句填空题","语句填空题","词的辨析","词的辨析","词的辨析","词的辨析","语境分析","语境分析","语境分析","语境分析","语境分析","工程问题","工程问题","工程问题","工程问题","最值问题","最值问题","最值问题","最值问题","周期问题","周期问题","周期问题","行程问题","行程问题","行程问题","行程问题","行程问题","行程问题","几何问题","几何问题","容斥原理问题","容斥原理问题","排列组合问题","排列组合问题","排列组合问题","排列组合问题","排列组合问题","排列组合问题","排列组合问题","概率问题","概率问题","概率问题","不定方程问题","不定方程问题","单定义","单定义","单定义","单定义","单定义","单定义","单定义","多定义","加强题型","加强题型","加强题型","加强题型","加强题型","削弱题型","削弱题型","削弱题型","削弱题型","削弱题型","削弱题型","削弱题型","翻译推理","翻译推理","翻译推理","翻译推理","语义关系","语义关系","语义关系","逻辑关系","逻辑关系","逻辑关系","逻辑关系","逻辑关系","阅读理解","阅读理解","阅读理解","阅读理解","语句表达","语句表达","语句表达","逻辑填空","逻辑填空","逻辑填空","逻辑填空","逻辑填空","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","数学运算","定义判断","定义判断","逻辑判断","逻辑判断","逻辑判断","逻辑判断","逻辑判断","类比推理","类比推理","类比推理","简单计算","简单计算","简单计算","基期与现期","基期与现期","基期与现期","基期与现期","基期与现期","基期与现期","增长率","增长率","增长率","增长率","增长量","增长量","增长量","增长量","比重问题","比重问题","比重问题","比重问题","平均数问题","平均数问题","平均数问题","平均数问题","平均数问题","倍数与比值相关","倍数与比值相关","倍数与比值相关","倍数与比值相关","政治常识","政治常识","经济常识","科技常识","科技常识","科技常识","科技常识","科技常识","人文常识","人文常识","人文常识","人文常识","地理国情","地理国情","法律常识","法律常识","法律常识","法律常识","法律常识","法律常识","法律常识","法律常识","言语理解与表达","言语理解与表达","言语理解与表达","数量关系","判断推理","判断推理","判断推理","资料分析","资料分析","资料分析","资料分析","资料分析","资料分析","资料分析","资���分析","资料分析","资料分析","常识判断","常识判断","常识判断","常识判断","常识判断","常识判断","","","","","",""],"values":[892,340,1028,634,1029,211,649,1130,409,629,193,153,110,139,659,560,38,234,417,295,1116,3837,801,808,662,378,1371,2173,4832,162,203,149,51,339,154,111,20,80,103,32,22,38,211,322,75,14,230,183,124,157,373,51,41,29,16,18,23,304,108,36,125,126,266,433,1148,521,1300,118,209,525,582,308,598,220,8,708,226,110,155,90,81,5,708,133,325,36,210,178,117,113,761,278,873,2087,6957,2221,346,465,1506,946,750,3340,2396,2474,6562,9416,565,624,169,1063,215,216,682,413,281,551,448,565,251,163,19,63,3995,525,1716,1375,1202,708,525,505,4112,240,105,118,52,152,24,18,22,61,7,147,50,41,2,113,34,4,2,244,120,91,2,35,94,53,7,3,50,64,32,1,3751,247,433,614,362,687,627,631,737,124,916,1087,568,629,347,669,513,309,75,641,69,105,9989,3202,24188,6288,4520,5526,4857,2168,1,275,284,240,153,457,192,147,441,3999,435,2921,2866,1198,2728,15907,37379,6288,14903,4358,14147],"type":"sunburst"}],"layout":{"template":{"data":{"histogram2dcontour":[{"type":"histogram2dcontour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"choropleth":[{"type":"choropleth","colorbar":{"outlinewidth":0,"ticks":""}}],"histogram2d":[{"type":"histogram2d","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmap":[{"type":"heatmap","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmapgl":[{"type":"heatmapgl","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"contourcarpet":[{"type":"contourcarpet","colorbar":{"outlinewidth":0,"ticks":""}}],"contour":[{"type":"contour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"surface":[{"type":"surface","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"mesh3d":[{"type":"mesh3d","colorbar":{"outlinewidth":0,"ticks":""}}],"scatter":[{"fillpattern":{"fillmode":"overlay","size":10,"solidity":0.2},"type":"scatter"}],"parcoords":[{"type":"parcoords","line":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolargl":[{"type":"scatterpolargl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"bar":[{"error_x":{"color":"#2a3f5f"},"error_y":{"color":"#2a3f5f"},"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"bar"}],"scattergeo":[{"type":"scattergeo","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolar":[{"type":"scatterpolar","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"histogram":[{"marker":{"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"histogram"}],"scattergl":[{"type":"scattergl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatter3d":[{"type":"scatter3d","line":{"colorbar":{"outlinewidth":0,"ticks":""}},"marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattermapbox":[{"type":"scattermapbox","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterternary":[{"type":"scatterternary","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattercarpet":[{"type":"scattercarpet","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"carpet":[{"aaxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"baxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"type":"carpet"}],"table":[{"cells":{"fill":{"color":"#EBF0F8"},"line":{"color":"white"}},"header":{"fill":{"color":"#C8D4E3"},"line":{"color":"white"}},"type":"table"}],"barpolar":[{"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"barpolar"}],"pie":[{"automargin":true,"type":"pie"}]},"layout":{"autotypenumbers":"strict","colorway":["#636efa","#EF553B","#00cc96","#ab63fa","#FFA15A","#19d3f3","#FF6692","#B6E880","#FF97FF","#FECB52"],"font":{"color":"#2a3f5f"},"hovermode":"closest","hoverlabel":{"align":"left"},"paper_bgcolor":"white","plot_bgcolor":"#E5ECF6","polar":{"bgcolor":"#E5ECF6","angularaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"radialaxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"ternary":{"bgcolor":"#E5ECF6","aaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"baxis":{"gridcolor":"white","linecolor":"white","ticks":""},"caxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"coloraxis":{"colorbar":{"outlinewidth":0,"ticks":""}},"colorscale":{"sequential":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"sequentialminus":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"diverging":[[0,"#8e0152"],[0.1,"#c51b7d"],[0.2,"#de77ae"],[0.3,"#f1b6da"],[0.4,"#fde0ef"],[0.5,"#f7f7f7"],[0.6,"#e6f5d0"],[0.7,"#b8e186"],[0.8,"#7fbc41"],[0.9,"#4d9221"],[1,"#276419"]]},"xaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"yaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"scene":{"xaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"yaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"zaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2}},"shapedefaults":{"line":{"color":"#2a3f5f"}},"annotationdefaults":{"arrowcolor":"#2a3f5f","arrowhead":0,"arrowwidth":1},"geo":{"bgcolor":"white","landcolor":"#E5ECF6","subunitcolor":"white","showland":true,"showlakes":true,"lakecolor":"white"},"title":{"x":0.05},"mapbox":{"style":"light"}}}}}"""
28
- DIFFICULTY_DISTRIBUTION = """{"data":[{"marker":{"color":[24,130,9283,18231,23734,10120,9546,69,12],"colorbar":{"title":{"text":"Total"}},"colorscale":[[0.0,"#440154"],[0.1111111111111111,"#482878"],[0.2222222222222222,"#3e4989"],[0.3333333333333333,"#31688e"],[0.4444444444444444,"#26828e"],[0.5555555555555556,"#1f9e89"],[0.6666666666666666,"#35b779"],[0.7777777777777778,"#6ece58"],[0.8888888888888888,"#b5de2b"],[1.0,"#fde725"]]},"x":[1,2,3,4,5,6,7,8,9],"y":[24,130,9283,18231,23734,10120,9546,69,12],"type":"bar"}],"layout":{"template":{"data":{"histogram2dcontour":[{"type":"histogram2dcontour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"choropleth":[{"type":"choropleth","colorbar":{"outlinewidth":0,"ticks":""}}],"histogram2d":[{"type":"histogram2d","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmap":[{"type":"heatmap","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmapgl":[{"type":"heatmapgl","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"contourcarpet":[{"type":"contourcarpet","colorbar":{"outlinewidth":0,"ticks":""}}],"contour":[{"type":"contour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"surface":[{"type":"surface","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"mesh3d":[{"type":"mesh3d","colorbar":{"outlinewidth":0,"ticks":""}}],"scatter":[{"fillpattern":{"fillmode":"overlay","size":10,"solidity":0.2},"type":"scatter"}],"parcoords":[{"type":"parcoords","line":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolargl":[{"type":"scatterpolargl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"bar":[{"error_x":{"color":"#2a3f5f"},"error_y":{"color":"#2a3f5f"},"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"bar"}],"scattergeo":[{"type":"scattergeo","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolar":[{"type":"scatterpolar","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"histogram":[{"marker":{"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"histogram"}],"scattergl":[{"type":"scattergl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatter3d":[{"type":"scatter3d","line":{"colorbar":{"outlinewidth":0,"ticks":""}},"marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattermapbox":[{"type":"scattermapbox","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterternary":[{"type":"scatterternary","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattercarpet":[{"type":"scattercarpet","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"carpet":[{"aaxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"baxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"type":"carpet"}],"table":[{"cells":{"fill":{"color":"#EBF0F8"},"line":{"color":"white"}},"header":{"fill":{"color":"#C8D4E3"},"line":{"color":"white"}},"type":"table"}],"barpolar":[{"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"barpolar"}],"pie":[{"automargin":true,"type":"pie"}]},"layout":{"autotypenumbers":"strict","colorway":["#636efa","#EF553B","#00cc96","#ab63fa","#FFA15A","#19d3f3","#FF6692","#B6E880","#FF97FF","#FECB52"],"font":{"color":"#2a3f5f"},"hovermode":"closest","hoverlabel":{"align":"left"},"paper_bgcolor":"white","plot_bgcolor":"#E5ECF6","polar":{"bgcolor":"#E5ECF6","angularaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"radialaxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"ternary":{"bgcolor":"#E5ECF6","aaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"baxis":{"gridcolor":"white","linecolor":"white","ticks":""},"caxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"coloraxis":{"colorbar":{"outlinewidth":0,"ticks":""}},"colorscale":{"sequential":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"sequentialminus":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"diverging":[[0,"#8e0152"],[0.1,"#c51b7d"],[0.2,"#de77ae"],[0.3,"#f1b6da"],[0.4,"#fde0ef"],[0.5,"#f7f7f7"],[0.6,"#e6f5d0"],[0.7,"#b8e186"],[0.8,"#7fbc41"],[0.9,"#4d9221"],[1,"#276419"]]},"xaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"yaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"scene":{"xaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"yaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"zaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2}},"shapedefaults":{"line":{"color":"#2a3f5f"}},"annotationdefaults":{"arrowcolor":"#2a3f5f","arrowhead":0,"arrowwidth":1},"geo":{"bgcolor":"white","landcolor":"#E5ECF6","subunitcolor":"white","showland":true,"showlakes":true,"lakecolor":"white"},"title":{"x":0.05},"mapbox":{"style":"light"}}},"yaxis":{"type":"log"}}}"""
29
- TEST_SET_TEXT = """
30
- The test set comprises a total of 1768 records.
31
-
32
- Among these records, there are 988 distinct combinations of Keypoints, which indicates the provision of an additional few-shot examples amounting to 988 * 5.
33
-
34
- The test set encompasses all 171 Keypoint categories.
35
-
36
- If you want to using HuggingFace dataset, go to [ANGO Dataset] https://huggingface.co/datasets/AngoHF/ANGO-S1
37
- For more details, please refer to the "About" page.
38
- """
39
- TEST_SCRIPT_TEXT = """
40
- <br>
41
- The evaluation script requires three mandatory arguments, while the others should remain unchanged.
42
-
43
- --model_path: specifies the location where the model parameters are saved.
44
- --dataset_path: indicates the directory where the ANGO test set data is stored.
45
-
46
- --save_path: denotes the path where the evaluation results will be saved.
47
-
48
- You can modify the specific functions to adapt them to your model.
49
-
50
- <br>
51
- Upon completion of the evaluation, the script will generate three files:
52
-
53
- acc_result: This file contains the predicted results for each record, along with statistical data at the question level.
54
-
55
- category_result: This file provides statistical data at the Keypoint level.
56
-
57
- difficulty_result: This file includes statistical data categorized by difficulty level.
58
- """
59
- SUBMIT_TEXT = """
60
- You can raise PR in this Space to submit your result, and we will update leaderboard manually after check.
61
- """
62
-
63
- ABOUT_HTML = """
64
- <h1>What is ANGO</h1>
65
- <p>We introduce a novel Chinese LLM benchmark dataset called ANGO, aiming to provide more in-depth guidance for model training and evaluation. We introduce the format of a single-question multiple-keypoints dataset for the first time, which will provide the most complete description for each question, enabling the test results to comprehensively showcase the model's performance from multiple perspectives. Based on the single-question multiple-keypoints format, we design a more detailed and refined model capability classification system - the Keypoint Tree, which reflects the relationships between different keypoints. It includes a total of 171 specific model capabilities accumulated in 4 hierarchical levels. With the help of the KeyPoint Tree, the performance of models on multiple levels of capabilities can be quickly measured, and corresponding adjustments can be made. ANGO also involves two new question attributes: human accuracy and human error-prone options. Based on human accuracy, we propose a more detailed difficulty classification compared to previous benchmarks. By combining the human accuracy of the question itself, the human accuracy of the involved key points, and the actual score of the question, all questions are divided into 9 difficulty levels, providing a quantifiable reference for evaluating models of different difficulty.</p>
66
-
67
- <p>In addition to the innovative data, we propose a complete set of verification processes tailored for ANGO, which can provide fairer results compared to the current leaderboards. This includes conducting multiple experiments with option shuffling to mitigate the issue of data leakage, designing test set sampling strategies that fully utilize the characteristics of ANGO, and implementing elimination mechanisms for high-accuracy questions. Based on these, we establish a dynamic updating system for the test set, resembling a seasonal system. Thanks to these methods, ANGO can continually update the test results, ensuring the fairness and effectiveness of the leaderboard. By preserving the test results from multiple seasons, it can provide researchers with an overview of the current trends in optimizing models within the community.</p>
68
-
69
- <h1 id="space-title">Data Source</h1>
70
- <p>The data utilized in our study were exclusively obtained from the Administrative Proficiency Test, which serves as a significant component of the Chinese civil service examination.</p>
71
- <p>The Administrative Proficiency Test is entirely composed of multiple-choice questions and aims to evaluate the abilities and skills necessary for practical administrative work. This test covers a wide range of knowledge areas, including Expression& Comprehension , Data Analysis, Quantitative Relations, Judgement&Inference, and Common Knowledge. As a comprehensive assessment tool, it requires candidates to respond to a series of questions related to administrative work within a limited timeframe. These questions may involve policy formulation, problem-solving, personnel and resource management, as well as handling emergency situations. By formulating these questions, it facilitates the evaluation of candidates' analytical thinking, Judgement&Inference, problem-solving abilities, and language proficiency.</p>
72
- <p>The nature of the Administrative Proficiency Test necessitates candidates to tackle complex questions within a specified timeframe, making it an ideal testing environment for assessing the language capabilities of language models. Language models typically demonstrate excellent performance in generating and comprehending text, and this test provides concrete and intricate contexts that simulate real-world language communication and decision-making processes. By employing language models to answer these questions, we can evaluate their understanding of complex problems, Judgement&Inference abilities, as well as the accuracy and fluency of their language expressions.</p>
73
- <p>Furthermore, the Administrative Proficiency Test encompasses a broad coverage and diversity. It includes questions and scenarios from various administrative domains, such as government administration, social affairs, and economic development. This diversity aids in evaluating the language processing abilities of language models across different fields, thereby providing a more comprehensive understanding of their potential strengths and limitations in practical applications. Moreover, it offers valuable insights for future model improvements and applications.</p>
74
- <p>ANGO's data covers all 34 provinces in China and includes three different types of examinations conducted between 2008 and 2023, including formal and mock exams.</p>
75
- <h1 id="space-title">Data Processing</h1>
76
- <p>In order to enhance the quality of our data, we employed a simple yet efficient preprocessing approach.</p>
77
- <h4>Duplicate Removal</h4>
78
- <p>Given that mock exams often include previous exam questions, our data contained numerous duplicates. To address this issue, we employed a straightforward strategy of removing duplicates based on the record ID obtained from the data source. As a result of this step, the size of our data was reduced to 88,799 instances.</p>
79
-
80
- <h4>Image Removal</h4>
81
- <p>The data consisted of two types of images: formula pictures and other types (such as images containing graphics). However, since our primary focus was on Chinese Natural Language Processing (NLP) evaluation rather than the multi-modal domain, we opted to remove all records containing pure images. This resulted in the removal of 17,650 records.</p>
82
-
83
- <h4>Formula Replacement</h4>
84
- <p>As mentioned earlier, our data still contained formula pictures, and we recognized the importance of including formulae to ensure diversity in our data. To address this, we extracted 8,144 unique formula images from a pool of 34,062 LaTeX formulas derived from 5,574 questions. These images were then processed using a Formula OCR (Optical Character Recognition) model, followed by manual verification to ensure formula accuracy. Ultimately, we obtained a clean data consisting of 71,149 instances.</p>
85
- <h1 id="space-title">Data Format</h1>
86
-
87
- <ul>
88
- <li><strong>Question:</strong> The content of the question.</li>
89
- <li><strong>Material:</strong> Some questions require additional information from a given material.</li>
90
- <li><strong>Type:</strong> The classification of the question, encompassing single-choice and multiple-choice formats.</li>
91
- <li><strong>Options:</strong> The candidate answers, presented in a line-separated format.</li>
92
- <li><strong>Choice:</strong> The correct answer to the question.</li>
93
- <li><strong>Keypoints:</strong> All the keypoints involved in the question.</li>
94
- <li><strong>Human Accuracy:</strong> The accuracy of humans on this question.</li>
95
- <li><strong>Human Count:</strong> The number of times this question has been completed by humans.</li>
96
- <li><strong>Most Wrong:</strong> The option that humans are most likely to choose incorrectly.</li>
97
- <li><strong>Difficulty:</strong> The level of difficulty of the question, given by our standard.</li>
98
- <li><strong>Solution:</strong> A concise explanation of the methodology to arrive at the correct answer.</li>
99
- <li><strong>Source:</strong> The original index and examination source of the question.</li>
100
- <li><strong>Formulas:</strong> The count of formulas present in the material, question, and options.</li>
101
- </ul>
102
-
103
- <p>Here is an example record:</p>
104
-
105
- <div style="border: 1px solid black; padding: 10px;">
106
- <p>
107
- <strong>Question:</strong> Forward: Backward<br>
108
- <strong>Material:</strong> Please select the option that best resembles the relationship between the given words or phrases in the question stem.<br>
109
- <strong>Type:</strong> Single Choice<br>
110
- <strong>Options:</strong><br>
111
- A. Urge: Advise<br>
112
- B. Ocean: Land<br>
113
- C. Vibration: Quiet<br>
114
- D. Extend: Compress<br>
115
- <strong>Choice:</strong> D<br>
116
- <strong>Difficulty:</strong> 4<br>
117
- <strong>KeyPoints:</strong> Semantic Relationship - Antonym<br>
118
- <strong>Human Accuracy:</strong> 79.564999<br>
119
- <strong>Human Count:</strong> 183494<br>
120
- <strong>Most Wrong:</strong> C<br>
121
- <strong>Solution:</strong> Step 1: Determine the logical relationship between the words in the question stem. The two words in the question stem are antonyms. Step 2: Determine the logical relationship between the options. The option that has the same logical relationship as the question stem is option D. Option A is a synonym relationship, option B is a parallel relationship, and in option C, the antonym of "quiet" should be "noisy" instead of "vibration". Therefore, the correct answer is D.<br>
122
- <strong>Source:</strong> 2011 Jiangsu Province Civil Service Recruitment Examination 'Administrative Aptitude Test' (Category A), Question 41<br>
123
- <strong>Formulas:</strong> 0
124
- </p>
125
- </div>
126
-
127
- <h1 id="space-title">Wrong Hit & Wrong Value</h1>
128
- <p>There are two special attributes in ANGO:</p>
129
-
130
- <ul>
131
- <li>
132
- <strong>Human Acc:</strong> Refers to the accuracy of humans in this question.
133
- </li>
134
- <li>
135
- <strong>Most Wrong:</strong> Represents the option that humans are prone to get wrong.
136
- </li>
137
- </ul>
138
-
139
- <p>So based on these two attributes, we have derived two new metrics for evaluation:</p>
140
-
141
- <ul>
142
- <li>
143
- <strong>Wrong Hit:</strong> Refers to the number of times the model's incorrect predictions match the options that humans are prone to get wrong.
144
- </li>
145
- <li>
146
- <strong>Wrong Value:</strong> Calculated by taking the average of the human accuracy for all the questions in wrong_hit and subtracting that value from 1.
147
- </li>
148
- </ul>
149
-
150
- <p>Wrong Value and Wrong Hit do not express the model's ability to perfectly solve the problem, but rather to some extent demonstrate the similarity between the model and real humans. Due to intentional guidance or design errors in the questions, humans often exhibit a tendency for widespread errors. In such cases, if the model's predicted answer is similar to the widespread human error tendency, it indicates that the model's way of thinking is closer to that of the majority of ordinary humans.</p>
151
-
152
- <h1 id="space-title">Evaluation(Not Implement Yet)</h1>
153
- <p>To mitigate the impact of data leakage during model pretraining on benchmark evaluations, we have employed multiple benchmark evaluation tricks to enhance fairness and real-time performance of the benchmarks.</p>
154
-
155
- <h4>Confusion of Options Order</h4>
156
- <p>Sometimes, a model's correct answer to a specific question may not be due to mastering a certain ability or understanding the question, but rather because it has recognized patterns of token order in the training data. By shuffling the order of options in multiple-choice questions and making multiple predictions with the correct answer placed in different options, we can average the results to reduce the model's reliance on character order.</p>
157
-
158
- <h4>Season For Dynamic Evaluation</h4>
159
- <p>Thanks to sampling strategies optimized for ANGO, we can periodically sample the test set and update the leaderboard. This prevents certain institutions or individuals from maliciously hacking ANGO to inflate the model's performance. However, due to the limited number of questions in some key areas, dynamic iteration may not be feasible for all questions.</p>
160
-
161
- <h4>Question Elimination Mechanism</h4>
162
- <p>In addition to the aforementioned dynamic updating of season, a new question elimination mechanism has been proposed. This mechanism calculates the average accuracy of each question across all models for each iteration. Questions with accuracies exceeding a threshold are temporarily removed by ANGO to ensure reliable discrimination among questions in ANGO.</p>
163
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/nn.py DELETED
@@ -1,170 +0,0 @@
1
- """
2
- Various utilities for neural networks.
3
- """
4
-
5
- import math
6
-
7
- import torch as th
8
- import torch.nn as nn
9
-
10
-
11
- # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
12
- class SiLU(nn.Module):
13
- def forward(self, x):
14
- return x * th.sigmoid(x)
15
-
16
-
17
- class GroupNorm32(nn.GroupNorm):
18
- def forward(self, x):
19
- return super().forward(x.float()).type(x.dtype)
20
-
21
-
22
- def conv_nd(dims, *args, **kwargs):
23
- """
24
- Create a 1D, 2D, or 3D convolution module.
25
- """
26
- if dims == 1:
27
- return nn.Conv1d(*args, **kwargs)
28
- elif dims == 2:
29
- return nn.Conv2d(*args, **kwargs)
30
- elif dims == 3:
31
- return nn.Conv3d(*args, **kwargs)
32
- raise ValueError(f"unsupported dimensions: {dims}")
33
-
34
-
35
- def linear(*args, **kwargs):
36
- """
37
- Create a linear module.
38
- """
39
- return nn.Linear(*args, **kwargs)
40
-
41
-
42
- def avg_pool_nd(dims, *args, **kwargs):
43
- """
44
- Create a 1D, 2D, or 3D average pooling module.
45
- """
46
- if dims == 1:
47
- return nn.AvgPool1d(*args, **kwargs)
48
- elif dims == 2:
49
- return nn.AvgPool2d(*args, **kwargs)
50
- elif dims == 3:
51
- return nn.AvgPool3d(*args, **kwargs)
52
- raise ValueError(f"unsupported dimensions: {dims}")
53
-
54
-
55
- def update_ema(target_params, source_params, rate=0.99):
56
- """
57
- Update target parameters to be closer to those of source parameters using
58
- an exponential moving average.
59
-
60
- :param target_params: the target parameter sequence.
61
- :param source_params: the source parameter sequence.
62
- :param rate: the EMA rate (closer to 1 means slower).
63
- """
64
- for targ, src in zip(target_params, source_params):
65
- targ.detach().mul_(rate).add_(src, alpha=1 - rate)
66
-
67
-
68
- def zero_module(module):
69
- """
70
- Zero out the parameters of a module and return it.
71
- """
72
- for p in module.parameters():
73
- p.detach().zero_()
74
- return module
75
-
76
-
77
- def scale_module(module, scale):
78
- """
79
- Scale the parameters of a module and return it.
80
- """
81
- for p in module.parameters():
82
- p.detach().mul_(scale)
83
- return module
84
-
85
-
86
- def mean_flat(tensor):
87
- """
88
- Take the mean over all non-batch dimensions.
89
- """
90
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
91
-
92
-
93
- def normalization(channels):
94
- """
95
- Make a standard normalization layer.
96
-
97
- :param channels: number of input channels.
98
- :return: an nn.Module for normalization.
99
- """
100
- return GroupNorm32(32, channels)
101
-
102
-
103
- def timestep_embedding(timesteps, dim, max_period=10000):
104
- """
105
- Create sinusoidal timestep embeddings.
106
-
107
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
108
- These may be fractional.
109
- :param dim: the dimension of the output.
110
- :param max_period: controls the minimum frequency of the embeddings.
111
- :return: an [N x dim] Tensor of positional embeddings.
112
- """
113
- half = dim // 2
114
- freqs = th.exp(
115
- -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
116
- ).to(device=timesteps.device)
117
- args = timesteps[:, None].float() * freqs[None]
118
- embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
119
- if dim % 2:
120
- embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
121
- return embedding
122
-
123
-
124
- def checkpoint(func, inputs, params, flag):
125
- """
126
- Evaluate a function without caching intermediate activations, allowing for
127
- reduced memory at the expense of extra compute in the backward pass.
128
-
129
- :param func: the function to evaluate.
130
- :param inputs: the argument sequence to pass to `func`.
131
- :param params: a sequence of parameters `func` depends on but does not
132
- explicitly take as arguments.
133
- :param flag: if False, disable gradient checkpointing.
134
- """
135
- if flag:
136
- args = tuple(inputs) + tuple(params)
137
- return CheckpointFunction.apply(func, len(inputs), *args)
138
- else:
139
- return func(*inputs)
140
-
141
-
142
- class CheckpointFunction(th.autograd.Function):
143
- @staticmethod
144
- def forward(ctx, run_function, length, *args):
145
- ctx.run_function = run_function
146
- ctx.input_tensors = list(args[:length])
147
- ctx.input_params = list(args[length:])
148
- with th.no_grad():
149
- output_tensors = ctx.run_function(*ctx.input_tensors)
150
- return output_tensors
151
-
152
- @staticmethod
153
- def backward(ctx, *output_grads):
154
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
155
- with th.enable_grad():
156
- # Fixes a bug where the first op in run_function modifies the
157
- # Tensor storage in place, which is not allowed for detach()'d
158
- # Tensors.
159
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
160
- output_tensors = ctx.run_function(*shallow_copies)
161
- input_grads = th.autograd.grad(
162
- output_tensors,
163
- ctx.input_tensors + ctx.input_params,
164
- output_grads,
165
- allow_unused=True,
166
- )
167
- del ctx.input_tensors
168
- del ctx.input_params
169
- del output_tensors
170
- return (None, None) + input_grads
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/api.py DELETED
@@ -1,169 +0,0 @@
1
- # based on https://github.com/isl-org/MiDaS
2
-
3
- import cv2
4
- import os
5
- import torch
6
- import torch.nn as nn
7
- from torchvision.transforms import Compose
8
-
9
- from .midas.dpt_depth import DPTDepthModel
10
- from .midas.midas_net import MidasNet
11
- from .midas.midas_net_custom import MidasNet_small
12
- from .midas.transforms import Resize, NormalizeImage, PrepareForNet
13
- from annotator.util import annotator_ckpts_path
14
-
15
-
16
- ISL_PATHS = {
17
- "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"),
18
- "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
19
- "midas_v21": "",
20
- "midas_v21_small": "",
21
- }
22
-
23
- remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
24
-
25
-
26
- def disabled_train(self, mode=True):
27
- """Overwrite model.train with this function to make sure train/eval mode
28
- does not change anymore."""
29
- return self
30
-
31
-
32
- def load_midas_transform(model_type):
33
- # https://github.com/isl-org/MiDaS/blob/master/run.py
34
- # load transform only
35
- if model_type == "dpt_large": # DPT-Large
36
- net_w, net_h = 384, 384
37
- resize_mode = "minimal"
38
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
39
-
40
- elif model_type == "dpt_hybrid": # DPT-Hybrid
41
- net_w, net_h = 384, 384
42
- resize_mode = "minimal"
43
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
44
-
45
- elif model_type == "midas_v21":
46
- net_w, net_h = 384, 384
47
- resize_mode = "upper_bound"
48
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
49
-
50
- elif model_type == "midas_v21_small":
51
- net_w, net_h = 256, 256
52
- resize_mode = "upper_bound"
53
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
54
-
55
- else:
56
- assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
57
-
58
- transform = Compose(
59
- [
60
- Resize(
61
- net_w,
62
- net_h,
63
- resize_target=None,
64
- keep_aspect_ratio=True,
65
- ensure_multiple_of=32,
66
- resize_method=resize_mode,
67
- image_interpolation_method=cv2.INTER_CUBIC,
68
- ),
69
- normalization,
70
- PrepareForNet(),
71
- ]
72
- )
73
-
74
- return transform
75
-
76
-
77
- def load_model(model_type):
78
- # https://github.com/isl-org/MiDaS/blob/master/run.py
79
- # load network
80
- model_path = ISL_PATHS[model_type]
81
- if model_type == "dpt_large": # DPT-Large
82
- model = DPTDepthModel(
83
- path=model_path,
84
- backbone="vitl16_384",
85
- non_negative=True,
86
- )
87
- net_w, net_h = 384, 384
88
- resize_mode = "minimal"
89
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
90
-
91
- elif model_type == "dpt_hybrid": # DPT-Hybrid
92
- if not os.path.exists(model_path):
93
- from basicsr.utils.download_util import load_file_from_url
94
- load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
95
-
96
- model = DPTDepthModel(
97
- path=model_path,
98
- backbone="vitb_rn50_384",
99
- non_negative=True,
100
- )
101
- net_w, net_h = 384, 384
102
- resize_mode = "minimal"
103
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
104
-
105
- elif model_type == "midas_v21":
106
- model = MidasNet(model_path, non_negative=True)
107
- net_w, net_h = 384, 384
108
- resize_mode = "upper_bound"
109
- normalization = NormalizeImage(
110
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
111
- )
112
-
113
- elif model_type == "midas_v21_small":
114
- model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
115
- non_negative=True, blocks={'expand': True})
116
- net_w, net_h = 256, 256
117
- resize_mode = "upper_bound"
118
- normalization = NormalizeImage(
119
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
120
- )
121
-
122
- else:
123
- print(f"model_type '{model_type}' not implemented, use: --model_type large")
124
- assert False
125
-
126
- transform = Compose(
127
- [
128
- Resize(
129
- net_w,
130
- net_h,
131
- resize_target=None,
132
- keep_aspect_ratio=True,
133
- ensure_multiple_of=32,
134
- resize_method=resize_mode,
135
- image_interpolation_method=cv2.INTER_CUBIC,
136
- ),
137
- normalization,
138
- PrepareForNet(),
139
- ]
140
- )
141
-
142
- return model.eval(), transform
143
-
144
-
145
- class MiDaSInference(nn.Module):
146
- MODEL_TYPES_TORCH_HUB = [
147
- "DPT_Large",
148
- "DPT_Hybrid",
149
- "MiDaS_small"
150
- ]
151
- MODEL_TYPES_ISL = [
152
- "dpt_large",
153
- "dpt_hybrid",
154
- "midas_v21",
155
- "midas_v21_small",
156
- ]
157
-
158
- def __init__(self, model_type):
159
- super().__init__()
160
- assert (model_type in self.MODEL_TYPES_ISL)
161
- model, _ = load_model(model_type)
162
- self.model = model
163
- self.model.train = disabled_train
164
-
165
- def forward(self, x):
166
- with torch.no_grad():
167
- prediction = self.model(x)
168
- return prediction
169
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py DELETED
@@ -1,62 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import math
3
-
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from .registry import CONV_LAYERS
8
-
9
-
10
- @CONV_LAYERS.register_module()
11
- class Conv2dAdaptivePadding(nn.Conv2d):
12
- """Implementation of 2D convolution in tensorflow with `padding` as "same",
13
- which applies padding to input (if needed) so that input image gets fully
14
- covered by filter and stride you specified. For stride 1, this will ensure
15
- that output image size is same as input. For stride of 2, output dimensions
16
- will be half, for example.
17
-
18
- Args:
19
- in_channels (int): Number of channels in the input image
20
- out_channels (int): Number of channels produced by the convolution
21
- kernel_size (int or tuple): Size of the convolving kernel
22
- stride (int or tuple, optional): Stride of the convolution. Default: 1
23
- padding (int or tuple, optional): Zero-padding added to both sides of
24
- the input. Default: 0
25
- dilation (int or tuple, optional): Spacing between kernel elements.
26
- Default: 1
27
- groups (int, optional): Number of blocked connections from input
28
- channels to output channels. Default: 1
29
- bias (bool, optional): If ``True``, adds a learnable bias to the
30
- output. Default: ``True``
31
- """
32
-
33
- def __init__(self,
34
- in_channels,
35
- out_channels,
36
- kernel_size,
37
- stride=1,
38
- padding=0,
39
- dilation=1,
40
- groups=1,
41
- bias=True):
42
- super().__init__(in_channels, out_channels, kernel_size, stride, 0,
43
- dilation, groups, bias)
44
-
45
- def forward(self, x):
46
- img_h, img_w = x.size()[-2:]
47
- kernel_h, kernel_w = self.weight.size()[-2:]
48
- stride_h, stride_w = self.stride
49
- output_h = math.ceil(img_h / stride_h)
50
- output_w = math.ceil(img_w / stride_w)
51
- pad_h = (
52
- max((output_h - 1) * self.stride[0] +
53
- (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
54
- pad_w = (
55
- max((output_w - 1) * self.stride[1] +
56
- (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
57
- if pad_h > 0 or pad_w > 0:
58
- x = F.pad(x, [
59
- pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
60
- ])
61
- return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
62
- self.dilation, self.groups)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AsakuraMizu/moe-tts/models.py DELETED
@@ -1,549 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- import modules
8
- import attentions
9
- import monotonic_align
10
-
11
- from torch.nn import Conv1d, ConvTranspose1d, Conv2d
12
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
13
- from commons import init_weights, get_padding
14
-
15
-
16
- class StochasticDurationPredictor(nn.Module):
17
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
18
- super().__init__()
19
- filter_channels = in_channels # it needs to be removed from future version.
20
- self.in_channels = in_channels
21
- self.filter_channels = filter_channels
22
- self.kernel_size = kernel_size
23
- self.p_dropout = p_dropout
24
- self.n_flows = n_flows
25
- self.gin_channels = gin_channels
26
-
27
- self.log_flow = modules.Log()
28
- self.flows = nn.ModuleList()
29
- self.flows.append(modules.ElementwiseAffine(2))
30
- for i in range(n_flows):
31
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
32
- self.flows.append(modules.Flip())
33
-
34
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
35
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
36
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
37
- self.post_flows = nn.ModuleList()
38
- self.post_flows.append(modules.ElementwiseAffine(2))
39
- for i in range(4):
40
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
41
- self.post_flows.append(modules.Flip())
42
-
43
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
44
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
45
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
46
- if gin_channels != 0:
47
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
48
-
49
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
50
- x = torch.detach(x)
51
- x = self.pre(x)
52
- if g is not None:
53
- g = torch.detach(g)
54
- x = x + self.cond(g)
55
- x = self.convs(x, x_mask)
56
- x = self.proj(x) * x_mask
57
-
58
- if not reverse:
59
- flows = self.flows
60
- assert w is not None
61
-
62
- logdet_tot_q = 0
63
- h_w = self.post_pre(w)
64
- h_w = self.post_convs(h_w, x_mask)
65
- h_w = self.post_proj(h_w) * x_mask
66
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
67
- z_q = e_q
68
- for flow in self.post_flows:
69
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
70
- logdet_tot_q += logdet_q
71
- z_u, z1 = torch.split(z_q, [1, 1], 1)
72
- u = torch.sigmoid(z_u) * x_mask
73
- z0 = (w - u) * x_mask
74
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
75
- logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
76
-
77
- logdet_tot = 0
78
- z0, logdet = self.log_flow(z0, x_mask)
79
- logdet_tot += logdet
80
- z = torch.cat([z0, z1], 1)
81
- for flow in flows:
82
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
83
- logdet_tot = logdet_tot + logdet
84
- nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
85
- return nll + logq # [b]
86
- else:
87
- flows = list(reversed(self.flows))
88
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
89
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
90
- for flow in flows:
91
- z = flow(z, x_mask, g=x, reverse=reverse)
92
- z0, z1 = torch.split(z, [1, 1], 1)
93
- logw = z0
94
- return logw
95
-
96
-
97
- class DurationPredictor(nn.Module):
98
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
99
- super().__init__()
100
-
101
- self.in_channels = in_channels
102
- self.filter_channels = filter_channels
103
- self.kernel_size = kernel_size
104
- self.p_dropout = p_dropout
105
- self.gin_channels = gin_channels
106
-
107
- self.drop = nn.Dropout(p_dropout)
108
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
109
- self.norm_1 = modules.LayerNorm(filter_channels)
110
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
111
- self.norm_2 = modules.LayerNorm(filter_channels)
112
- self.proj = nn.Conv1d(filter_channels, 1, 1)
113
-
114
- if gin_channels != 0:
115
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
116
-
117
- def forward(self, x, x_mask, g=None):
118
- x = torch.detach(x)
119
- if g is not None:
120
- g = torch.detach(g)
121
- x = x + self.cond(g)
122
- x = self.conv_1(x * x_mask)
123
- x = torch.relu(x)
124
- x = self.norm_1(x)
125
- x = self.drop(x)
126
- x = self.conv_2(x * x_mask)
127
- x = torch.relu(x)
128
- x = self.norm_2(x)
129
- x = self.drop(x)
130
- x = self.proj(x * x_mask)
131
- return x * x_mask
132
-
133
-
134
- class TextEncoder(nn.Module):
135
- def __init__(self,
136
- n_vocab,
137
- out_channels,
138
- hidden_channels,
139
- filter_channels,
140
- n_heads,
141
- n_layers,
142
- kernel_size,
143
- p_dropout,
144
- emotion_embedding):
145
- super().__init__()
146
- self.n_vocab = n_vocab
147
- self.out_channels = out_channels
148
- self.hidden_channels = hidden_channels
149
- self.filter_channels = filter_channels
150
- self.n_heads = n_heads
151
- self.n_layers = n_layers
152
- self.kernel_size = kernel_size
153
- self.p_dropout = p_dropout
154
- self.emotion_embedding = emotion_embedding
155
-
156
- if self.n_vocab != 0:
157
- self.emb = nn.Embedding(n_vocab, hidden_channels)
158
- if emotion_embedding:
159
- self.emo_proj = nn.Linear(1024, hidden_channels)
160
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
161
-
162
- self.encoder = attentions.Encoder(
163
- hidden_channels,
164
- filter_channels,
165
- n_heads,
166
- n_layers,
167
- kernel_size,
168
- p_dropout)
169
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
170
-
171
- def forward(self, x, x_lengths, emotion_embedding=None):
172
- if self.n_vocab != 0:
173
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
174
- if emotion_embedding is not None:
175
- x = x + self.emo_proj(emotion_embedding.unsqueeze(1))
176
- x = torch.transpose(x, 1, -1) # [b, h, t]
177
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
178
-
179
- x = self.encoder(x * x_mask, x_mask)
180
- stats = self.proj(x) * x_mask
181
-
182
- m, logs = torch.split(stats, self.out_channels, dim=1)
183
- return x, m, logs, x_mask
184
-
185
-
186
- class ResidualCouplingBlock(nn.Module):
187
- def __init__(self,
188
- channels,
189
- hidden_channels,
190
- kernel_size,
191
- dilation_rate,
192
- n_layers,
193
- n_flows=4,
194
- gin_channels=0):
195
- super().__init__()
196
- self.channels = channels
197
- self.hidden_channels = hidden_channels
198
- self.kernel_size = kernel_size
199
- self.dilation_rate = dilation_rate
200
- self.n_layers = n_layers
201
- self.n_flows = n_flows
202
- self.gin_channels = gin_channels
203
-
204
- self.flows = nn.ModuleList()
205
- for i in range(n_flows):
206
- self.flows.append(
207
- modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
208
- gin_channels=gin_channels, mean_only=True))
209
- self.flows.append(modules.Flip())
210
-
211
- def forward(self, x, x_mask, g=None, reverse=False):
212
- if not reverse:
213
- for flow in self.flows:
214
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
215
- else:
216
- for flow in reversed(self.flows):
217
- x = flow(x, x_mask, g=g, reverse=reverse)
218
- return x
219
-
220
-
221
- class PosteriorEncoder(nn.Module):
222
- def __init__(self,
223
- in_channels,
224
- out_channels,
225
- hidden_channels,
226
- kernel_size,
227
- dilation_rate,
228
- n_layers,
229
- gin_channels=0):
230
- super().__init__()
231
- self.in_channels = in_channels
232
- self.out_channels = out_channels
233
- self.hidden_channels = hidden_channels
234
- self.kernel_size = kernel_size
235
- self.dilation_rate = dilation_rate
236
- self.n_layers = n_layers
237
- self.gin_channels = gin_channels
238
-
239
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
240
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
241
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
242
-
243
- def forward(self, x, x_lengths, g=None):
244
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
245
- x = self.pre(x) * x_mask
246
- x = self.enc(x, x_mask, g=g)
247
- stats = self.proj(x) * x_mask
248
- m, logs = torch.split(stats, self.out_channels, dim=1)
249
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
250
- return z, m, logs, x_mask
251
-
252
-
253
- class Generator(torch.nn.Module):
254
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
255
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
256
- super(Generator, self).__init__()
257
- self.num_kernels = len(resblock_kernel_sizes)
258
- self.num_upsamples = len(upsample_rates)
259
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
260
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
261
-
262
- self.ups = nn.ModuleList()
263
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
264
- self.ups.append(weight_norm(
265
- ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
266
- k, u, padding=(k - u) // 2)))
267
-
268
- self.resblocks = nn.ModuleList()
269
- for i in range(len(self.ups)):
270
- ch = upsample_initial_channel // (2 ** (i + 1))
271
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
272
- self.resblocks.append(resblock(ch, k, d))
273
-
274
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
275
- self.ups.apply(init_weights)
276
-
277
- if gin_channels != 0:
278
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
279
-
280
- def forward(self, x, g=None):
281
- x = self.conv_pre(x)
282
- if g is not None:
283
- x = x + self.cond(g)
284
-
285
- for i in range(self.num_upsamples):
286
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
287
- x = self.ups[i](x)
288
- xs = None
289
- for j in range(self.num_kernels):
290
- if xs is None:
291
- xs = self.resblocks[i * self.num_kernels + j](x)
292
- else:
293
- xs += self.resblocks[i * self.num_kernels + j](x)
294
- x = xs / self.num_kernels
295
- x = F.leaky_relu(x)
296
- x = self.conv_post(x)
297
- x = torch.tanh(x)
298
-
299
- return x
300
-
301
- def remove_weight_norm(self):
302
- print('Removing weight norm...')
303
- for l in self.ups:
304
- remove_weight_norm(l)
305
- for l in self.resblocks:
306
- l.remove_weight_norm()
307
-
308
-
309
- class DiscriminatorP(torch.nn.Module):
310
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
311
- super(DiscriminatorP, self).__init__()
312
- self.period = period
313
- self.use_spectral_norm = use_spectral_norm
314
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
315
- self.convs = nn.ModuleList([
316
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
317
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
318
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
319
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
320
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
321
- ])
322
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
323
-
324
- def forward(self, x):
325
- fmap = []
326
-
327
- # 1d to 2d
328
- b, c, t = x.shape
329
- if t % self.period != 0: # pad first
330
- n_pad = self.period - (t % self.period)
331
- x = F.pad(x, (0, n_pad), "reflect")
332
- t = t + n_pad
333
- x = x.view(b, c, t // self.period, self.period)
334
-
335
- for l in self.convs:
336
- x = l(x)
337
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
338
- fmap.append(x)
339
- x = self.conv_post(x)
340
- fmap.append(x)
341
- x = torch.flatten(x, 1, -1)
342
-
343
- return x, fmap
344
-
345
-
346
- class DiscriminatorS(torch.nn.Module):
347
- def __init__(self, use_spectral_norm=False):
348
- super(DiscriminatorS, self).__init__()
349
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
350
- self.convs = nn.ModuleList([
351
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
352
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
353
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
354
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
355
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
356
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
357
- ])
358
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
359
-
360
- def forward(self, x):
361
- fmap = []
362
-
363
- for l in self.convs:
364
- x = l(x)
365
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
366
- fmap.append(x)
367
- x = self.conv_post(x)
368
- fmap.append(x)
369
- x = torch.flatten(x, 1, -1)
370
-
371
- return x, fmap
372
-
373
-
374
- class MultiPeriodDiscriminator(torch.nn.Module):
375
- def __init__(self, use_spectral_norm=False):
376
- super(MultiPeriodDiscriminator, self).__init__()
377
- periods = [2, 3, 5, 7, 11]
378
-
379
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
380
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
381
- self.discriminators = nn.ModuleList(discs)
382
-
383
- def forward(self, y, y_hat):
384
- y_d_rs = []
385
- y_d_gs = []
386
- fmap_rs = []
387
- fmap_gs = []
388
- for i, d in enumerate(self.discriminators):
389
- y_d_r, fmap_r = d(y)
390
- y_d_g, fmap_g = d(y_hat)
391
- y_d_rs.append(y_d_r)
392
- y_d_gs.append(y_d_g)
393
- fmap_rs.append(fmap_r)
394
- fmap_gs.append(fmap_g)
395
-
396
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
397
-
398
-
399
- class SynthesizerTrn(nn.Module):
400
- """
401
- Synthesizer for Training
402
- """
403
-
404
- def __init__(self,
405
- n_vocab,
406
- spec_channels,
407
- segment_size,
408
- inter_channels,
409
- hidden_channels,
410
- filter_channels,
411
- n_heads,
412
- n_layers,
413
- kernel_size,
414
- p_dropout,
415
- resblock,
416
- resblock_kernel_sizes,
417
- resblock_dilation_sizes,
418
- upsample_rates,
419
- upsample_initial_channel,
420
- upsample_kernel_sizes,
421
- n_speakers=0,
422
- gin_channels=0,
423
- use_sdp=True,
424
- emotion_embedding=False,
425
- **kwargs):
426
-
427
- super().__init__()
428
- self.n_vocab = n_vocab
429
- self.spec_channels = spec_channels
430
- self.inter_channels = inter_channels
431
- self.hidden_channels = hidden_channels
432
- self.filter_channels = filter_channels
433
- self.n_heads = n_heads
434
- self.n_layers = n_layers
435
- self.kernel_size = kernel_size
436
- self.p_dropout = p_dropout
437
- self.resblock = resblock
438
- self.resblock_kernel_sizes = resblock_kernel_sizes
439
- self.resblock_dilation_sizes = resblock_dilation_sizes
440
- self.upsample_rates = upsample_rates
441
- self.upsample_initial_channel = upsample_initial_channel
442
- self.upsample_kernel_sizes = upsample_kernel_sizes
443
- self.segment_size = segment_size
444
- self.n_speakers = n_speakers
445
- self.gin_channels = gin_channels
446
-
447
- self.use_sdp = use_sdp
448
-
449
- self.enc_p = TextEncoder(n_vocab,
450
- inter_channels,
451
- hidden_channels,
452
- filter_channels,
453
- n_heads,
454
- n_layers,
455
- kernel_size,
456
- p_dropout,
457
- emotion_embedding)
458
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
459
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
460
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
461
- gin_channels=gin_channels)
462
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
463
-
464
- if use_sdp:
465
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
466
- else:
467
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
468
-
469
- if n_speakers > 1:
470
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
471
-
472
- def forward(self, x, x_lengths, y, y_lengths, sid=None, emotion_embedding=None):
473
-
474
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
475
- if self.n_speakers > 1:
476
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
477
- else:
478
- g = None
479
-
480
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
481
- z_p = self.flow(z, y_mask, g=g)
482
-
483
- with torch.no_grad():
484
- # negative cross-entropy
485
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
486
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
487
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),
488
- s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
489
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
490
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
491
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
492
-
493
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
494
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
495
-
496
- w = attn.sum(2)
497
- if self.use_sdp:
498
- l_length = self.dp(x, x_mask, w, g=g)
499
- l_length = l_length / torch.sum(x_mask)
500
- else:
501
- logw_ = torch.log(w + 1e-6) * x_mask
502
- logw = self.dp(x, x_mask, g=g)
503
- l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging
504
-
505
- # expand prior
506
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
507
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
508
-
509
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
510
- o = self.dec(z_slice, g=g)
511
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
512
-
513
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None,
514
- emotion_embedding=None):
515
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
516
- if self.n_speakers > 1:
517
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
518
- else:
519
- g = None
520
-
521
- if self.use_sdp:
522
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
523
- else:
524
- logw = self.dp(x, x_mask, g=g)
525
- w = torch.exp(logw) * x_mask * length_scale
526
- w_ceil = torch.ceil(w)
527
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
528
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
529
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
530
- attn = commons.generate_path(w_ceil, attn_mask)
531
-
532
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
533
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
534
- 2) # [b, t', t], [b, t, d] -> [b, d, t']
535
-
536
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
537
- z = self.flow(z_p, y_mask, g=g, reverse=True)
538
- o = self.dec((z * y_mask)[:, :, :max_len], g=g)
539
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
540
-
541
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
542
- assert self.n_speakers > 1, "n_speakers have to be larger than 1."
543
- g_src = self.emb_g(sid_src).unsqueeze(-1)
544
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
545
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
546
- z_p = self.flow(z, y_mask, g=g_src)
547
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
548
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
549
- return o_hat, y_mask, (z, z_p, z_hat)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_legacy.py DELETED
@@ -1,102 +0,0 @@
1
- import logging
2
- import os.path
3
- from typing import List, Optional
4
-
5
- from pip._internal.cli.spinners import open_spinner
6
- from pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args
7
- from pip._internal.utils.subprocess import call_subprocess, format_command_args
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- def format_command_result(
13
- command_args: List[str],
14
- command_output: str,
15
- ) -> str:
16
- """Format command information for logging."""
17
- command_desc = format_command_args(command_args)
18
- text = f"Command arguments: {command_desc}\n"
19
-
20
- if not command_output:
21
- text += "Command output: None"
22
- elif logger.getEffectiveLevel() > logging.DEBUG:
23
- text += "Command output: [use --verbose to show]"
24
- else:
25
- if not command_output.endswith("\n"):
26
- command_output += "\n"
27
- text += f"Command output:\n{command_output}"
28
-
29
- return text
30
-
31
-
32
- def get_legacy_build_wheel_path(
33
- names: List[str],
34
- temp_dir: str,
35
- name: str,
36
- command_args: List[str],
37
- command_output: str,
38
- ) -> Optional[str]:
39
- """Return the path to the wheel in the temporary build directory."""
40
- # Sort for determinism.
41
- names = sorted(names)
42
- if not names:
43
- msg = ("Legacy build of wheel for {!r} created no files.\n").format(name)
44
- msg += format_command_result(command_args, command_output)
45
- logger.warning(msg)
46
- return None
47
-
48
- if len(names) > 1:
49
- msg = (
50
- "Legacy build of wheel for {!r} created more than one file.\n"
51
- "Filenames (choosing first): {}\n"
52
- ).format(name, names)
53
- msg += format_command_result(command_args, command_output)
54
- logger.warning(msg)
55
-
56
- return os.path.join(temp_dir, names[0])
57
-
58
-
59
- def build_wheel_legacy(
60
- name: str,
61
- setup_py_path: str,
62
- source_dir: str,
63
- global_options: List[str],
64
- build_options: List[str],
65
- tempd: str,
66
- ) -> Optional[str]:
67
- """Build one unpacked package using the "legacy" build process.
68
-
69
- Returns path to wheel if successfully built. Otherwise, returns None.
70
- """
71
- wheel_args = make_setuptools_bdist_wheel_args(
72
- setup_py_path,
73
- global_options=global_options,
74
- build_options=build_options,
75
- destination_dir=tempd,
76
- )
77
-
78
- spin_message = f"Building wheel for {name} (setup.py)"
79
- with open_spinner(spin_message) as spinner:
80
- logger.debug("Destination directory: %s", tempd)
81
-
82
- try:
83
- output = call_subprocess(
84
- wheel_args,
85
- command_desc="python setup.py bdist_wheel",
86
- cwd=source_dir,
87
- spinner=spinner,
88
- )
89
- except Exception:
90
- spinner.finish("error")
91
- logger.error("Failed building wheel for %s", name)
92
- return None
93
-
94
- names = os.listdir(tempd)
95
- wheel_path = get_legacy_build_wheel_path(
96
- names=names,
97
- temp_dir=tempd,
98
- name=name,
99
- command_args=wheel_args,
100
- command_output=output,
101
- )
102
- return wheel_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/core.py DELETED
@@ -1,400 +0,0 @@
1
- from . import idnadata
2
- import bisect
3
- import unicodedata
4
- import re
5
- from typing import Union, Optional
6
- from .intranges import intranges_contain
7
-
8
- _virama_combining_class = 9
9
- _alabel_prefix = b'xn--'
10
- _unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
11
-
12
- class IDNAError(UnicodeError):
13
- """ Base exception for all IDNA-encoding related problems """
14
- pass
15
-
16
-
17
- class IDNABidiError(IDNAError):
18
- """ Exception when bidirectional requirements are not satisfied """
19
- pass
20
-
21
-
22
- class InvalidCodepoint(IDNAError):
23
- """ Exception when a disallowed or unallocated codepoint is used """
24
- pass
25
-
26
-
27
- class InvalidCodepointContext(IDNAError):
28
- """ Exception when the codepoint is not valid in the context it is used """
29
- pass
30
-
31
-
32
- def _combining_class(cp: int) -> int:
33
- v = unicodedata.combining(chr(cp))
34
- if v == 0:
35
- if not unicodedata.name(chr(cp)):
36
- raise ValueError('Unknown character in unicodedata')
37
- return v
38
-
39
- def _is_script(cp: str, script: str) -> bool:
40
- return intranges_contain(ord(cp), idnadata.scripts[script])
41
-
42
- def _punycode(s: str) -> bytes:
43
- return s.encode('punycode')
44
-
45
- def _unot(s: int) -> str:
46
- return 'U+{:04X}'.format(s)
47
-
48
-
49
- def valid_label_length(label: Union[bytes, str]) -> bool:
50
- if len(label) > 63:
51
- return False
52
- return True
53
-
54
-
55
- def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
56
- if len(label) > (254 if trailing_dot else 253):
57
- return False
58
- return True
59
-
60
-
61
- def check_bidi(label: str, check_ltr: bool = False) -> bool:
62
- # Bidi rules should only be applied if string contains RTL characters
63
- bidi_label = False
64
- for (idx, cp) in enumerate(label, 1):
65
- direction = unicodedata.bidirectional(cp)
66
- if direction == '':
67
- # String likely comes from a newer version of Unicode
68
- raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
69
- if direction in ['R', 'AL', 'AN']:
70
- bidi_label = True
71
- if not bidi_label and not check_ltr:
72
- return True
73
-
74
- # Bidi rule 1
75
- direction = unicodedata.bidirectional(label[0])
76
- if direction in ['R', 'AL']:
77
- rtl = True
78
- elif direction == 'L':
79
- rtl = False
80
- else:
81
- raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
82
-
83
- valid_ending = False
84
- number_type = None # type: Optional[str]
85
- for (idx, cp) in enumerate(label, 1):
86
- direction = unicodedata.bidirectional(cp)
87
-
88
- if rtl:
89
- # Bidi rule 2
90
- if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
91
- raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
92
- # Bidi rule 3
93
- if direction in ['R', 'AL', 'EN', 'AN']:
94
- valid_ending = True
95
- elif direction != 'NSM':
96
- valid_ending = False
97
- # Bidi rule 4
98
- if direction in ['AN', 'EN']:
99
- if not number_type:
100
- number_type = direction
101
- else:
102
- if number_type != direction:
103
- raise IDNABidiError('Can not mix numeral types in a right-to-left label')
104
- else:
105
- # Bidi rule 5
106
- if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
107
- raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
108
- # Bidi rule 6
109
- if direction in ['L', 'EN']:
110
- valid_ending = True
111
- elif direction != 'NSM':
112
- valid_ending = False
113
-
114
- if not valid_ending:
115
- raise IDNABidiError('Label ends with illegal codepoint directionality')
116
-
117
- return True
118
-
119
-
120
- def check_initial_combiner(label: str) -> bool:
121
- if unicodedata.category(label[0])[0] == 'M':
122
- raise IDNAError('Label begins with an illegal combining character')
123
- return True
124
-
125
-
126
- def check_hyphen_ok(label: str) -> bool:
127
- if label[2:4] == '--':
128
- raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
129
- if label[0] == '-' or label[-1] == '-':
130
- raise IDNAError('Label must not start or end with a hyphen')
131
- return True
132
-
133
-
134
- def check_nfc(label: str) -> None:
135
- if unicodedata.normalize('NFC', label) != label:
136
- raise IDNAError('Label must be in Normalization Form C')
137
-
138
-
139
- def valid_contextj(label: str, pos: int) -> bool:
140
- cp_value = ord(label[pos])
141
-
142
- if cp_value == 0x200c:
143
-
144
- if pos > 0:
145
- if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
146
- return True
147
-
148
- ok = False
149
- for i in range(pos-1, -1, -1):
150
- joining_type = idnadata.joining_types.get(ord(label[i]))
151
- if joining_type == ord('T'):
152
- continue
153
- if joining_type in [ord('L'), ord('D')]:
154
- ok = True
155
- break
156
-
157
- if not ok:
158
- return False
159
-
160
- ok = False
161
- for i in range(pos+1, len(label)):
162
- joining_type = idnadata.joining_types.get(ord(label[i]))
163
- if joining_type == ord('T'):
164
- continue
165
- if joining_type in [ord('R'), ord('D')]:
166
- ok = True
167
- break
168
- return ok
169
-
170
- if cp_value == 0x200d:
171
-
172
- if pos > 0:
173
- if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
174
- return True
175
- return False
176
-
177
- else:
178
-
179
- return False
180
-
181
-
182
- def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
183
- cp_value = ord(label[pos])
184
-
185
- if cp_value == 0x00b7:
186
- if 0 < pos < len(label)-1:
187
- if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
188
- return True
189
- return False
190
-
191
- elif cp_value == 0x0375:
192
- if pos < len(label)-1 and len(label) > 1:
193
- return _is_script(label[pos + 1], 'Greek')
194
- return False
195
-
196
- elif cp_value == 0x05f3 or cp_value == 0x05f4:
197
- if pos > 0:
198
- return _is_script(label[pos - 1], 'Hebrew')
199
- return False
200
-
201
- elif cp_value == 0x30fb:
202
- for cp in label:
203
- if cp == '\u30fb':
204
- continue
205
- if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
206
- return True
207
- return False
208
-
209
- elif 0x660 <= cp_value <= 0x669:
210
- for cp in label:
211
- if 0x6f0 <= ord(cp) <= 0x06f9:
212
- return False
213
- return True
214
-
215
- elif 0x6f0 <= cp_value <= 0x6f9:
216
- for cp in label:
217
- if 0x660 <= ord(cp) <= 0x0669:
218
- return False
219
- return True
220
-
221
- return False
222
-
223
-
224
- def check_label(label: Union[str, bytes, bytearray]) -> None:
225
- if isinstance(label, (bytes, bytearray)):
226
- label = label.decode('utf-8')
227
- if len(label) == 0:
228
- raise IDNAError('Empty Label')
229
-
230
- check_nfc(label)
231
- check_hyphen_ok(label)
232
- check_initial_combiner(label)
233
-
234
- for (pos, cp) in enumerate(label):
235
- cp_value = ord(cp)
236
- if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
237
- continue
238
- elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
239
- try:
240
- if not valid_contextj(label, pos):
241
- raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
242
- _unot(cp_value), pos+1, repr(label)))
243
- except ValueError:
244
- raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
245
- _unot(cp_value), pos+1, repr(label)))
246
- elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
247
- if not valid_contexto(label, pos):
248
- raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
249
- else:
250
- raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
251
-
252
- check_bidi(label)
253
-
254
-
255
- def alabel(label: str) -> bytes:
256
- try:
257
- label_bytes = label.encode('ascii')
258
- ulabel(label_bytes)
259
- if not valid_label_length(label_bytes):
260
- raise IDNAError('Label too long')
261
- return label_bytes
262
- except UnicodeEncodeError:
263
- pass
264
-
265
- if not label:
266
- raise IDNAError('No Input')
267
-
268
- label = str(label)
269
- check_label(label)
270
- label_bytes = _punycode(label)
271
- label_bytes = _alabel_prefix + label_bytes
272
-
273
- if not valid_label_length(label_bytes):
274
- raise IDNAError('Label too long')
275
-
276
- return label_bytes
277
-
278
-
279
- def ulabel(label: Union[str, bytes, bytearray]) -> str:
280
- if not isinstance(label, (bytes, bytearray)):
281
- try:
282
- label_bytes = label.encode('ascii')
283
- except UnicodeEncodeError:
284
- check_label(label)
285
- return label
286
- else:
287
- label_bytes = label
288
-
289
- label_bytes = label_bytes.lower()
290
- if label_bytes.startswith(_alabel_prefix):
291
- label_bytes = label_bytes[len(_alabel_prefix):]
292
- if not label_bytes:
293
- raise IDNAError('Malformed A-label, no Punycode eligible content found')
294
- if label_bytes.decode('ascii')[-1] == '-':
295
- raise IDNAError('A-label must not end with a hyphen')
296
- else:
297
- check_label(label_bytes)
298
- return label_bytes.decode('ascii')
299
-
300
- try:
301
- label = label_bytes.decode('punycode')
302
- except UnicodeError:
303
- raise IDNAError('Invalid A-label')
304
- check_label(label)
305
- return label
306
-
307
-
308
- def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
309
- """Re-map the characters in the string according to UTS46 processing."""
310
- from .uts46data import uts46data
311
- output = ''
312
-
313
- for pos, char in enumerate(domain):
314
- code_point = ord(char)
315
- try:
316
- uts46row = uts46data[code_point if code_point < 256 else
317
- bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
318
- status = uts46row[1]
319
- replacement = None # type: Optional[str]
320
- if len(uts46row) == 3:
321
- replacement = uts46row[2] # type: ignore
322
- if (status == 'V' or
323
- (status == 'D' and not transitional) or
324
- (status == '3' and not std3_rules and replacement is None)):
325
- output += char
326
- elif replacement is not None and (status == 'M' or
327
- (status == '3' and not std3_rules) or
328
- (status == 'D' and transitional)):
329
- output += replacement
330
- elif status != 'I':
331
- raise IndexError()
332
- except IndexError:
333
- raise InvalidCodepoint(
334
- 'Codepoint {} not allowed at position {} in {}'.format(
335
- _unot(code_point), pos + 1, repr(domain)))
336
-
337
- return unicodedata.normalize('NFC', output)
338
-
339
-
340
- def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
341
- if isinstance(s, (bytes, bytearray)):
342
- try:
343
- s = s.decode('ascii')
344
- except UnicodeDecodeError:
345
- raise IDNAError('should pass a unicode string to the function rather than a byte string.')
346
- if uts46:
347
- s = uts46_remap(s, std3_rules, transitional)
348
- trailing_dot = False
349
- result = []
350
- if strict:
351
- labels = s.split('.')
352
- else:
353
- labels = _unicode_dots_re.split(s)
354
- if not labels or labels == ['']:
355
- raise IDNAError('Empty domain')
356
- if labels[-1] == '':
357
- del labels[-1]
358
- trailing_dot = True
359
- for label in labels:
360
- s = alabel(label)
361
- if s:
362
- result.append(s)
363
- else:
364
- raise IDNAError('Empty label')
365
- if trailing_dot:
366
- result.append(b'')
367
- s = b'.'.join(result)
368
- if not valid_string_length(s, trailing_dot):
369
- raise IDNAError('Domain too long')
370
- return s
371
-
372
-
373
- def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
374
- try:
375
- if isinstance(s, (bytes, bytearray)):
376
- s = s.decode('ascii')
377
- except UnicodeDecodeError:
378
- raise IDNAError('Invalid ASCII in A-label')
379
- if uts46:
380
- s = uts46_remap(s, std3_rules, False)
381
- trailing_dot = False
382
- result = []
383
- if not strict:
384
- labels = _unicode_dots_re.split(s)
385
- else:
386
- labels = s.split('.')
387
- if not labels or labels == ['']:
388
- raise IDNAError('Empty domain')
389
- if not labels[-1]:
390
- del labels[-1]
391
- trailing_dot = True
392
- for label in labels:
393
- s = ulabel(label)
394
- if s:
395
- result.append(s)
396
- else:
397
- raise IDNAError('Empty label')
398
- if trailing_dot:
399
- result.append('')
400
- return '.'.join(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/ansi.py DELETED
@@ -1,240 +0,0 @@
1
- import re
2
- import sys
3
- from contextlib import suppress
4
- from typing import Iterable, NamedTuple, Optional
5
-
6
- from .color import Color
7
- from .style import Style
8
- from .text import Text
9
-
10
- re_ansi = re.compile(
11
- r"""
12
- (?:\x1b\](.*?)\x1b\\)|
13
- (?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~]))
14
- """,
15
- re.VERBOSE,
16
- )
17
-
18
-
19
- class _AnsiToken(NamedTuple):
20
- """Result of ansi tokenized string."""
21
-
22
- plain: str = ""
23
- sgr: Optional[str] = ""
24
- osc: Optional[str] = ""
25
-
26
-
27
- def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]:
28
- """Tokenize a string in to plain text and ANSI codes.
29
-
30
- Args:
31
- ansi_text (str): A String containing ANSI codes.
32
-
33
- Yields:
34
- AnsiToken: A named tuple of (plain, sgr, osc)
35
- """
36
-
37
- position = 0
38
- sgr: Optional[str]
39
- osc: Optional[str]
40
- for match in re_ansi.finditer(ansi_text):
41
- start, end = match.span(0)
42
- osc, sgr = match.groups()
43
- if start > position:
44
- yield _AnsiToken(ansi_text[position:start])
45
- if sgr:
46
- if sgr == "(":
47
- position = end + 1
48
- continue
49
- if sgr.endswith("m"):
50
- yield _AnsiToken("", sgr[1:-1], osc)
51
- else:
52
- yield _AnsiToken("", sgr, osc)
53
- position = end
54
- if position < len(ansi_text):
55
- yield _AnsiToken(ansi_text[position:])
56
-
57
-
58
- SGR_STYLE_MAP = {
59
- 1: "bold",
60
- 2: "dim",
61
- 3: "italic",
62
- 4: "underline",
63
- 5: "blink",
64
- 6: "blink2",
65
- 7: "reverse",
66
- 8: "conceal",
67
- 9: "strike",
68
- 21: "underline2",
69
- 22: "not dim not bold",
70
- 23: "not italic",
71
- 24: "not underline",
72
- 25: "not blink",
73
- 26: "not blink2",
74
- 27: "not reverse",
75
- 28: "not conceal",
76
- 29: "not strike",
77
- 30: "color(0)",
78
- 31: "color(1)",
79
- 32: "color(2)",
80
- 33: "color(3)",
81
- 34: "color(4)",
82
- 35: "color(5)",
83
- 36: "color(6)",
84
- 37: "color(7)",
85
- 39: "default",
86
- 40: "on color(0)",
87
- 41: "on color(1)",
88
- 42: "on color(2)",
89
- 43: "on color(3)",
90
- 44: "on color(4)",
91
- 45: "on color(5)",
92
- 46: "on color(6)",
93
- 47: "on color(7)",
94
- 49: "on default",
95
- 51: "frame",
96
- 52: "encircle",
97
- 53: "overline",
98
- 54: "not frame not encircle",
99
- 55: "not overline",
100
- 90: "color(8)",
101
- 91: "color(9)",
102
- 92: "color(10)",
103
- 93: "color(11)",
104
- 94: "color(12)",
105
- 95: "color(13)",
106
- 96: "color(14)",
107
- 97: "color(15)",
108
- 100: "on color(8)",
109
- 101: "on color(9)",
110
- 102: "on color(10)",
111
- 103: "on color(11)",
112
- 104: "on color(12)",
113
- 105: "on color(13)",
114
- 106: "on color(14)",
115
- 107: "on color(15)",
116
- }
117
-
118
-
119
- class AnsiDecoder:
120
- """Translate ANSI code in to styled Text."""
121
-
122
- def __init__(self) -> None:
123
- self.style = Style.null()
124
-
125
- def decode(self, terminal_text: str) -> Iterable[Text]:
126
- """Decode ANSI codes in an iterable of lines.
127
-
128
- Args:
129
- lines (Iterable[str]): An iterable of lines of terminal output.
130
-
131
- Yields:
132
- Text: Marked up Text.
133
- """
134
- for line in terminal_text.splitlines():
135
- yield self.decode_line(line)
136
-
137
- def decode_line(self, line: str) -> Text:
138
- """Decode a line containing ansi codes.
139
-
140
- Args:
141
- line (str): A line of terminal output.
142
-
143
- Returns:
144
- Text: A Text instance marked up according to ansi codes.
145
- """
146
- from_ansi = Color.from_ansi
147
- from_rgb = Color.from_rgb
148
- _Style = Style
149
- text = Text()
150
- append = text.append
151
- line = line.rsplit("\r", 1)[-1]
152
- for plain_text, sgr, osc in _ansi_tokenize(line):
153
- if plain_text:
154
- append(plain_text, self.style or None)
155
- elif osc is not None:
156
- if osc.startswith("8;"):
157
- _params, semicolon, link = osc[2:].partition(";")
158
- if semicolon:
159
- self.style = self.style.update_link(link or None)
160
- elif sgr is not None:
161
- # Translate in to semi-colon separated codes
162
- # Ignore invalid codes, because we want to be lenient
163
- codes = [
164
- min(255, int(_code) if _code else 0)
165
- for _code in sgr.split(";")
166
- if _code.isdigit() or _code == ""
167
- ]
168
- iter_codes = iter(codes)
169
- for code in iter_codes:
170
- if code == 0:
171
- # reset
172
- self.style = _Style.null()
173
- elif code in SGR_STYLE_MAP:
174
- # styles
175
- self.style += _Style.parse(SGR_STYLE_MAP[code])
176
- elif code == 38:
177
- #  Foreground
178
- with suppress(StopIteration):
179
- color_type = next(iter_codes)
180
- if color_type == 5:
181
- self.style += _Style.from_color(
182
- from_ansi(next(iter_codes))
183
- )
184
- elif color_type == 2:
185
- self.style += _Style.from_color(
186
- from_rgb(
187
- next(iter_codes),
188
- next(iter_codes),
189
- next(iter_codes),
190
- )
191
- )
192
- elif code == 48:
193
- # Background
194
- with suppress(StopIteration):
195
- color_type = next(iter_codes)
196
- if color_type == 5:
197
- self.style += _Style.from_color(
198
- None, from_ansi(next(iter_codes))
199
- )
200
- elif color_type == 2:
201
- self.style += _Style.from_color(
202
- None,
203
- from_rgb(
204
- next(iter_codes),
205
- next(iter_codes),
206
- next(iter_codes),
207
- ),
208
- )
209
-
210
- return text
211
-
212
-
213
- if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover
214
- import io
215
- import os
216
- import pty
217
- import sys
218
-
219
- decoder = AnsiDecoder()
220
-
221
- stdout = io.BytesIO()
222
-
223
- def read(fd: int) -> bytes:
224
- data = os.read(fd, 1024)
225
- stdout.write(data)
226
- return data
227
-
228
- pty.spawn(sys.argv[1:], read)
229
-
230
- from .console import Console
231
-
232
- console = Console(record=True)
233
-
234
- stdout_result = stdout.getvalue().decode("utf-8")
235
- print(stdout_result)
236
-
237
- for line in decoder.decode(stdout_result):
238
- console.print(line)
239
-
240
- console.save_html("stdout.html")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AutoLLM/AutoAgents/autoagents/agents/__init__.py DELETED
File without changes
spaces/Awesimo/jojogan/e4e/utils/model_utils.py DELETED
@@ -1,35 +0,0 @@
1
- import torch
2
- import argparse
3
- from models.psp import pSp
4
- from models.encoders.psp_encoders import Encoder4Editing
5
-
6
-
7
- def setup_model(checkpoint_path, device='cuda'):
8
- ckpt = torch.load(checkpoint_path, map_location='cpu')
9
- opts = ckpt['opts']
10
-
11
- opts['checkpoint_path'] = checkpoint_path
12
- opts['device'] = device
13
- opts = argparse.Namespace(**opts)
14
-
15
- net = pSp(opts)
16
- net.eval()
17
- net = net.to(device)
18
- return net, opts
19
-
20
-
21
- def load_e4e_standalone(checkpoint_path, device='cuda'):
22
- ckpt = torch.load(checkpoint_path, map_location='cpu')
23
- opts = argparse.Namespace(**ckpt['opts'])
24
- e4e = Encoder4Editing(50, 'ir_se', opts)
25
- e4e_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')}
26
- e4e.load_state_dict(e4e_dict)
27
- e4e.eval()
28
- e4e = e4e.to(device)
29
- latent_avg = ckpt['latent_avg'].to(device)
30
-
31
- def add_latent_avg(model, inputs, outputs):
32
- return outputs + latent_avg.repeat(outputs.shape[0], 1, 1)
33
-
34
- e4e.register_forward_hook(add_latent_avg)
35
- return e4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BLACKHOST/Banner/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Banner
3
- emoji: 🚀
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Aethersx2 2023 Apk.md DELETED
@@ -1,188 +0,0 @@
1
- <br />
2
- <h1>AetherSX2 2023 APK: Jugar juegos de PS2 en su dispositivo Android</h1>
3
- <p>¿Echas de menos jugar a tus juegos favoritos de PS2 pero ya no tienes consola? ¿Quieres experimentar la nostalgia de títulos clásicos como Final Fantasy X, God of War, Grand Theft Auto, Metal Gear Solid y más en tu smartphone o tablet? Si es así, es posible que esté interesado en AetherSX2, un emulador de PS2 para Android que le permite ejecutar juegos de PS2 en su dispositivo con un alto rendimiento y calidad. En este artículo, le diremos todo lo que necesita saber sobre AetherSX2 2023 APK, incluyendo lo que es, cómo descargarlo e instalarlo, cómo jugar juegos de PS2 en él, y cuáles son sus pros y contras. </p>
4
- <h2>¿Qué es AetherSX2? </h2>
5
- <h3>Un emulador de PS2 para Android</h3>
6
- <p>AetherSX2 es un emulador de la consola PS Two para la plataforma Android. Puede jugar a juegos que haya descargado desde el disco en su dispositivo portátil. Se requiere una imagen de BIOS <strong></strong> para jugar y no es opcional. Esta imagen debe ser objeto de dumping desde su propia consola, utilizando una aplicación homebrew. Recomendamos biosdrain. </p>
7
- <h2>aethersx2 2023 apk</h2><br /><p><b><b>Download File</b> &#128505; <a href="https://bltlly.com/2v6MuG">https://bltlly.com/2v6MuG</a></b></p><br /><br />
8
- <h3>Características y requisitos</h3>
9
- <p>AetherSX2 tiene muchas características que lo convierten en uno de los mejores emuladores de PS2 para Android, como:</p>
10
- <ul>
11
- <li>Simulación del sistema</li>
12
- <li>OpenGL, Vulkan y representación de software</li>
13
- <li>Ampliación de los juegos a 1080p y más allá</li>
14
- <li>Parches de pantalla ancha para juegos sin soporte nativo</li>
15
- <li>Guardar estados</li>
16
- <li> Pantalla táctil y controlador bluetooth soporte</li>
17
- <li>Los juegos se pueden cargar desde imágenes de disco iso/chd/cso</li>
18
- <li>Configuración del juego</li>
19
- </ul>
20
- <p>Sin embargo, AetherSX2 también tiene algunos requisitos que debe cumplir para que funcione sin problemas. Necesitas un dispositivo de alta gama para lograr un buen rendimiento. Recomendamos al menos un dispositivo equivalente a Snapdragon 845. Esto significa 4 núcleos grandes (nivel Cortex-A75, 500 o más núcleo único Geekbench 5). </p>
21
- <h2>Cómo descargar e instalar AetherSX2 APK? </h2>
22
- <h3>Descargar desde APKCombo</h3>
23
-
24
- <p>Una vez que haya descargado el archivo AetherSX2 APK, es necesario instalarlo en su dispositivo. Para ello, debe habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para habilitar esta opción, siga estos pasos:</p>
25
- <ol>
26
- <li>Ir a la configuración del dispositivo y toque en Seguridad o Privacidad.</li>
27
- <li>Encontrar la opción que dice Fuentes desconocidas o Instalar aplicaciones desconocidas y alternar en. </li>
28
- <li>Confirme su elección tocando OK o Permitir.</li>
29
- </ol>
30
- <p>Ahora puede instalar el archivo AetherSX2 APK siguiendo estos pasos:</p>
31
- <ol>
32
- <li>Localizar el archivo APK AetherSX2 en el almacenamiento del dispositivo utilizando una aplicación de administrador de archivos. </li>
33
- <li>Toque en el archivo y seleccione Instalar.</li>
34
- <li>Espere a que termine la instalación y toque Abrir o Listo.</li>
35
- </ol>
36
- <h3>Conceder permisos y ejecutar la aplicación</h3>
37
- <p>La primera vez que inicie la aplicación AetherSX2, tendrá que conceder algunos permisos para que funcione correctamente. Estos permisos incluyen:</p>
38
- <ul>
39
- <li>Almacenamiento: Para acceder a los archivos del juego y guardar estados. </li>
40
- <li>Cámara: Para escanear códigos QR para descargar juegos. </li>
41
- <li>Micrófono: Para usar chat de voz en juegos multijugador en línea. </li>
42
- </ul>
43
- <p>Para conceder estos permisos, siga estos pasos:</p>
44
- <ol>
45
- <li>Toque en el icono de la aplicación AetherSX2 en la pantalla de inicio o en el cajón de la aplicación. </li>
46
- <li> Verá una ventana emergente pidiendo permisos. Pulse Permitir o Aceptar para cada uno. </li>
47
- <li>Si no ve la ventana emergente, vaya a la configuración del dispositivo y toque en Aplicaciones o Aplicaciones.</li>
48
- <li>Encuentra y toca en AetherSX2 y luego toca en Permisos.</li>
49
- <li> Cambiar los permisos que desea conceder. </li>
50
- </ol>
51
- <p>Ahora estás listo para usar la aplicación AetherSX2 y jugar juegos de PS2 en tu dispositivo Android. </p>
52
- <h2>Cómo jugar juegos de PS2 en AetherSX2? </h2>
53
- <h3>Descarga tus propios juegos de PS2</h3>
54
-
55
- <ol>
56
- <li>Descargar biosdrain desde aquí: <a href=">biosdrain - GitHub - biosdrain es una aplicación casera para descargar discos de PlayStation 2 e imágenes de BIOS - github.com - Gratis - Software para PlayStation 2 GitHub Búsqueda</a>. </li>
57
- <li>Grabar biosdrain a un CD-R usando un software como ImgBurn o Nero.</li>
58
- <li>Inserte el CD-R biosdrain en su consola PS2 y enciéndalo. </li>
59
- <li> Verá un menú con dos opciones: Dump BIOS y Dump Disc.</li>
60
- <li>Seleccione Volcar BIOS y siga las instrucciones en la pantalla. Necesitará una unidad flash USB o un disco duro externo formateado como FAT32 para almacenar la imagen del BIOS. </li>
61
- <li>Una vez que la imagen del BIOS es objeto de dumping, retire el CD-R biosdrain e inserte el disco de juego PS2 que desea volcar. </li>
62
- <li>Seleccione Volcar disco y siga las instrucciones en la pantalla. Necesitará otra unidad flash USB o disco duro externo formateado como FAT32 para almacenar la imagen del juego. </li>
63
- <li>Repite este proceso para cada juego de PS2 que quieras volcar. </li>
64
- </ol>
65
- <h3>Copia los archivos del juego a tu dispositivo</h3>
66
- <p>Lo siguiente que tienes que hacer es copiar los archivos del juego que has descargado de tus discos PS2 a tu dispositivo Android. Para ello, puede utilizar un cable USB, una aplicación de transferencia inalámbrica o un servicio de almacenamiento en la nube. Los archivos del juego deben estar en formato iso/chd/cso, que son imágenes de disco comprimido que pueden ser leídas por AetherSX2. Para copiar los archivos del juego a tu dispositivo, sigue estos pasos:</p>
67
- <p></p>
68
- <ol>
69
- <li>Conecte su unidad flash USB o disco duro externo que contiene los archivos del juego a su computadora usando un cable USB o un adaptador. </li>
70
- <li>Abra una aplicación de administrador de archivos en su computadora y localice los archivos del juego que desea copiar. Deben estar en una carpeta llamada PS2ISO o similar. </li>
71
- <li>Seleccione los archivos del juego que desea copiar y cópielos en el portapapeles utilizando un atajo de teclado o un menú con el botón derecho del ratón. </li>
72
-
73
- <li>Abra una aplicación de administrador de archivos en su dispositivo Android y vaya a la carpeta donde desea almacenar los archivos del juego. Puede crear una nueva carpeta llamada AetherSX2 o similar. </li>
74
- <li>Pegar los archivos del juego desde el portapapeles a la carpeta en el dispositivo mediante un acceso directo del teclado o un menú con el botón derecho del ratón. </li>
75
- <li>Desconecte su dispositivo Android y su unidad flash USB o disco duro externo de su computadora. </li>
76
- </ol>
77
- <h3>Cargar el juego desde la aplicación</h3>
78
- <p>Lo último que tienes que hacer es cargar el juego que quieres jugar desde la aplicación AetherSX2. Para hacer esto, sigue estos pasos:</p>
79
- <ol>
80
- <li> Inicie la aplicación AetherSX2 en su dispositivo Android y toque en el icono del menú en la esquina superior izquierda. </li>
81
- <li>Toque en Configuración y luego toque en BIOS. Localice y seleccione la imagen del BIOS que ha descargado desde su consola PS2. Pulse OK para confirmar. </li>
82
- <li>Vuelve al menú principal y toca en Juegos. Verás una lista de juegos que están disponibles en el almacenamiento de tu dispositivo. Toque en el juego que desea jugar. </li>
83
- <li>El juego comenzará a cargarse y verás una pantalla de carga con información sobre el juego. Espera a que el juego se cargue completamente. </li>
84
- <li>Ahora puedes jugar el juego usando los controles de pantalla táctil o un controlador bluetooth. También puedes ajustar la configuración, guardar y cargar estados y acceder a otras funciones desde el menú del juego. </li>
85
- </ol>
86
- <h3>Ajuste los ajustes y disfrute</h3>
87
- <p>AetherSX2 tiene muchas configuraciones que puedes ajustar para mejorar tu experiencia de juego. Puede cambiar el modo de renderizado, la resolución, la relación de aspecto, la velocidad de fotogramas, la calidad de audio, el diseño del controlador y más. Para acceder a la configuración, siga estos pasos:</p>
88
- <ol>
89
- <li>Mientras juega un juego, toque en el icono del menú en la esquina superior derecha. </li>
90
- <li>Toque en Configuración y luego toque en la categoría que desea modificar. Verá una lista de opciones que puede cambiar. </li>
91
-
92
- <li>Pulse Aceptar o Aplicar para guardar sus cambios y volver al juego. </li>
93
- </ol>
94
- <p>También puede acceder a algunos ajustes rápidos tocando el icono de engranaje en la esquina inferior derecha de la pantalla. Puede cambiar el modo de pantalla completa, silenciar el sonido, habilitar trucos, tomar capturas de pantalla y más desde allí. </p>
95
- <p>Ahora puedes disfrutar jugando juegos de PS2 en tu dispositivo Android con AetherSX2. ¡Diviértete! </p>
96
- <h2>Pros y contras de AetherSX2</h2>
97
- <h4>Pros</h4>
98
- <p>AetherSX2 tiene muchas ventajas que lo convierten en una gran opción para la emulación de PS2 en Android, como:</p>
99
- <tabla>
100
- <tr>
101
- <th>AetherSX2</th>
102
- <th>DamonPS2</th>
103
- <th>Jugar! </th>
104
- </tr>
105
- <tr>
106
- <td>Libre y de código abierto</td>
107
- <td>Pago y propiedad</td>
108
- <td>Libre y de código abierto</td>
109
- </tr>
110
- <tr>
111
- <td>Alta compatibilidad y rendimiento</td>
112
- <td>Alta compatibilidad y rendimiento</td>
113
- <td>Baja compatibilidad y rendimiento</td>
114
- </tr>
115
- <tr>
116
- <td>No hay anuncios ni compras en la aplicación</td>
117
- <td>Anuncios y compras en la aplicación</td>
118
- <td>No hay anuncios ni compras en la aplicación</td>
119
- </tr>
120
- <tr>
121
- <td>Actualizaciones frecuentes y correcciones de errores</td>
122
- <td>Actualizaciones raras y correcciones de errores</td>
123
- <td>Actualizaciones frecuentes y correcciones de errores</td>
124
- </tr>
125
- <tr>
126
- <td>Interfaz y características fáciles de usar</td>
127
- <td>Interfaz y características fáciles de usar</td>
128
- <td>Interfaz y características simplistas</td>
129
- </tr>
130
- <tr>
131
- <td>No se requiere conexión a Internet</td>
132
- <td>Se requiere conexión a Internet para la verificación de licencias</td>
133
- <td>No se requiere conexión a Internet</td>
134
- </tr>
135
- <tr>
136
- <td>No hay DRM o medidas contra la piratería</td>
137
- <td>DRM y medidas contra la piratería que pueden dañar su dispositivo o datos</td>
138
- <td>No hay DRM o medidas contra la piratería</td>
139
- </tr>
140
- <tr> <td>Soporta Vulkan y renderizado de software</td>
141
- <td>Solo soporta OpenGL</td>
142
- <td>Soporta OpenGL y renderizado de software</td>
143
- </tr>
144
- </tabla>
145
- <p>Como puedes ver, AetherSX2 tiene muchos beneficios sobre otros emuladores de PS2 para Android, por lo que es una opción superior para los fans de PS2. </p>
146
- <h4>Contras</h4>
147
-
148
- <ul>
149
- <li>Requiere un dispositivo de gama alta para funcionar sin problemas. Si tiene un dispositivo de gama baja o media, puede experimentar retrasos, fallos o fallos. </li>
150
- <li>No es compatible con funciones multijugador en línea o de red. Solo puede jugar juegos multijugador sin conexión o locales. </li>
151
- <li>No tiene una biblioteca de juegos o descargador incorporado. Tienes que volcar tus propios juegos y copiarlos en tu dispositivo manualmente. </li>
152
- <li>Puede no ser compatible con algunos juegos o dispositivos. Algunos juegos pueden no funcionar, o pueden tener problemas gráficos o de audio. </li>
153
- <li>Puede violar algunas leyes o regulaciones en su país o región. Debe comprobar el estado legal de la emulación y el dumping del juego antes de usar AetherSX2.</li>
154
- </ul>
155
- <p>Estos son algunos de los inconvenientes de AetherSX2 que debes considerar antes de usarlo. </p>
156
- <h2>Conclusión</h2>
157
- <p>AetherSX2 es un emulador de PS2 para Android que te permite jugar juegos de PS2 en tu dispositivo con alto rendimiento y calidad. Tiene muchas características y ventajas sobre otros emuladores de PS2 para Android, como ser libre, de código abierto, sin anuncios, fácil de usar y compatible con la mayoría de los juegos y dispositivos. Sin embargo, también tiene algunos requisitos y limitaciones que debes tener en cuenta, como la necesidad de un dispositivo de alta gama, una imagen de BIOS y tus propios archivos de juego. También debe comprobar el estado legal de la emulación y el dumping de juegos en su país o región antes de usar AetherSX2.</p>
158
- <p>Si eres un fan de PS2 y quieres revivir la nostalgia de jugar a tus juegos favoritos en tu dispositivo Android, AetherSX2 es una gran opción para ti. Puedes descargarlo e instalarlo fácilmente desde APKCombo, y seguir los pasos de este artículo para configurarlo y jugar juegos de PS2 en él. También puedes ajustar la configuración para adaptarla a tus preferencias y disfrutar de la mejor experiencia de juego de PS2 en tu dispositivo. </p>
159
- <h2>Preguntas frecuentes</h2>
160
- <p>Aquí hay algunas preguntas frecuentes sobre AetherSX2:</p>
161
- <ol>
162
- <li><strong>¿Es seguro usar AetherSX2? </strong></li>
163
-
164
- <li><strong>¿Es legal usar AetherSX2? </strong></li>
165
- <p>AetherSX2 es legal de usar siempre y cuando sigas las reglas de uso justo y solo juegues juegos que poseas y hayas comprado. No debe descargar o distribuir juegos piratas o imágenes de BIOS, ya que pueden violar los derechos de propiedad intelectual de los desarrolladores y editores de juegos. También debe verificar las leyes y regulaciones de su país o región con respecto a la emulación y el dumping de juegos antes de usar AetherSX2.</p>
166
- <li><strong>¿Cómo puedo mejorar el rendimiento de AetherSX2? </strong></li>
167
- <p>Puedes mejorar el rendimiento de AetherSX2 siguiendo estos consejos:</p>
168
- <ul>
169
- <li>Utilice un dispositivo de gama alta con un procesador potente y suficiente RAM.</li>
170
- <li>Utilice el modo de representación Vulkan si su dispositivo lo admite. </li>
171
- <li>Reduzca la resolución y la velocidad de fotogramas si experimenta retraso o tartamudeo. </li>
172
- <li>Cerrar otras aplicaciones que se ejecutan en segundo plano que pueden consumir recursos. </li>
173
- <li> Mantenga su dispositivo fresco y evitar el sobrecalentamiento. </li>
174
- </ul>
175
- <li><strong>¿Cómo puedo solucionar los problemas gráficos o de audio de AetherSX2? </strong></li>
176
- <p>Puede solucionar los problemas gráficos o de audio de AetherSX2 siguiendo estos consejos:</p>
177
- <ul>
178
- <li>Utilice el modo de representación de software si Vulkan o OpenGL causa fallas o artefactos. </li>
179
- <li>Habilitar o deshabilitar parches de pantalla ancha dependiendo de la relación de aspecto nativa del juego. </li>
180
- <li>Ajuste la latencia de audio y los ajustes de tamaño del búfer si escucha sonidos crepitantes o de estallido. </li>
181
- <li>Actualiza los controladores y el firmware de tu dispositivo si están desactualizados. </li>
182
- <li>Póngase en contacto con los desarrolladores de AetherSX2 si encuentra algún error o errores que necesitan corrección. </li>
183
- </ul>
184
- <li><strong>¿Dónde puedo obtener más información y soporte para AetherSX2? </strong></li>
185
-
186
- <p>Espero que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejarlos a continuación. ¡Gracias por leer y jugar feliz! </p> 64aa2da5cf<br />
187
- <br />
188
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Camioneros De Europa 3 Mod Apk Dinero Ilimitado Ios.md DELETED
@@ -1,56 +0,0 @@
1
- <br />
2
- <h1>Camioneros de Europa 3 Mod APK dinero ilimitado IOS: Una revisión</h1>
3
- <p>¿Te encanta conducir camiones y explorar diferentes países de Europa? ¿Quieres experimentar la emoción y el desafío de ser un conductor de camión en escenarios realistas? Si es así, entonces deberías probar Truckers of Europe 3, un juego de simulador de camiones que te permite conducir varios camiones con diferentes remolques en toda Europa. Y si usted quiere hacer su juego más divertido y emocionante, usted debe descargar Camioneros de Europa 3 Mod APK dinero ilimitado IOS, una versión modificada del juego que le da dinero ilimitado y acceso a todas las características. En este artículo, revisaremos esta versión modificada y le mostraremos cómo descargarla e instalarla en su dispositivo IOS. También compartiremos algunos consejos y trucos para jugar el juego y responder a algunas preguntas frecuentes. </p>
4
- <h2>¿Qué es Camioneros de Europa 3?</h2>
5
-
6
- <h2>¿Por qué descargar camioneros de Europa 3 Mod APK dinero ilimitado IOS? </h2>
7
- <p>Truckers of Europe 3 es un gran juego que ofrece mucha diversión y entretenimiento para los amantes de los camiones. Sin embargo, también tiene algunas limitaciones y desventajas que pueden afectar su disfrute. Por ejemplo, es necesario ganar dinero en el juego para comprar nuevos camiones, remolques, mejoras, personalizaciones, etc. Esto puede ser lento y tedioso. También debe seguir las normas de tráfico y los límites de velocidad para evitar multas y penalizaciones. Esto puede ser frustrante y molesto. También debe prestar atención a su nivel de combustible, nivel de daño, nivel de fatiga, etc. Esto puede ser estresante y desafiante. </p>
8
- <h2>camioneros de europa 3 mod apk dinero ilimitado ios</h2><br /><p><b><b>Download</b> &rArr;&rArr;&rArr; <a href="https://bltlly.com/2v6L3o">https://bltlly.com/2v6L3o</a></b></p><br /><br />
9
- <p>Es por eso que usted debe descargar camioneros de Europa 3 Mod APK dinero ilimitado IOS, una versión modificada del juego que le da dinero ilimitado y acceso a todas las características. Con esta versión modificada, puede disfrutar de los siguientes beneficios y ventajas:</p>
10
- <ul>
11
- <li> Puede comprar cualquier camión, remolque, actualización, personalización, etc. sin preocuparse por el costo. </li>
12
- <li> Puede conducir tan rápido como desee sin preocuparse por el límite de velocidad o multas. </li>
13
- <li> Puede ignorar las reglas de tráfico y conducir imprudentemente sin preocuparse por las sanciones o accidentes. </li>
14
- <li> Puede repostar su camión en cualquier momento sin preocuparse por el nivel de combustible o el costo. </li>
15
- <li> Puede reparar su camión en cualquier momento sin preocuparse por el nivel de daño o costo. </li>
16
- <li> Puede descansar en cualquier momento sin preocuparse por el nivel de fatiga o el tiempo. </li>
17
- <li>Puedes desbloquear todos los logros y trofeos sin ningún esfuerzo. </li>
18
- <h2>Cómo descargar e instalar camioneros de Europa 3 Mod APK dinero ilimitado IOS? </h2>
19
- <p>Descargar e instalar Truckers of Europe 3 Mod APK Unlimited Money IOS es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
20
- <ol>
21
- <li>Haga clic en este enlace para descargar la versión modificada del juego: [Descargar camioneros de Europa 3 Mod APK Unlimited Money IOS]. </li>
22
-
23
- <li>Siga las instrucciones en la pantalla y permita los permisos necesarios. </li>
24
- <li>Espere a que la instalación se complete y luego inicie el juego. </li>
25
- <li>Disfrutar jugando camioneros de Europa 3 Mod APK dinero ilimitado IOS con dinero ilimitado y acceso a todas las características. </li>
26
- </ol>
27
- <p>Nota: Es posible que tenga que habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo antes de instalar la versión modificada. También es posible que tenga que desinstalar la versión original del juego si lo tiene instalado en su dispositivo. </p>
28
- <h2>Consejos y trucos para jugar Camioneros de Europa 3 Mod APK dinero ilimitado IOS</h2>
29
- <p>Camioneros de Europa 3 Mod APK Unlimited Money IOS es un juego divertido y adictivo que te mantendrá entretenido durante horas. Sin embargo, también puede ser desafiante y difícil a veces. Por eso hemos preparado algunos consejos y trucos para ayudarte a mejorar tus habilidades y disfrutar más del juego. Estos son algunos de ellos:</p>
30
- <ul>
31
- <li> Utilice la navegación GPS y el mapa para planificar su ruta y evitar perderse o atascarse. </li>
32
- <li>Compruebe el pronóstico del tiempo y ajuste su conducción en consecuencia. Evite conducir en condiciones de mal tiempo como lluvia, nieve, niebla, etc.</li>
33
- <li>Utilice los espejos e indicadores para comprobar su entorno y señalar sus intenciones. Tenga cuidado al cambiar de carril, adelantar, girar, estacionar, etc.</li>
34
- <li>Siga las reglas de tráfico y los límites de velocidad para evitar multas y sanciones. Sin embargo, también puedes romperlos si quieres divertirte y divertirte. </li>
35
- <li>Mantenga un ojo en su nivel de combustible, nivel de daño, nivel de fatiga, etc. Repostar, reparar y descansar cuando sea necesario. Sin embargo, también puedes ignorarlos si quieres jugar sin limitaciones. </li>
36
- <li>Personalice y actualice su camión con varios trabajos de pintura, accesorios, luces, cuernos, etc. Haga que su camión se vea único e impresionante. </li>
37
- <li>Prueba diferentes camiones, remolques, cargas, países, modos de juego, niveles de dificultad, etc. Explora la variedad y diversidad del juego. </li>
38
-
39
- </ul>
40
- <h2>Conclusión</h2>
41
- <p>or time. También puedes desbloquear todos los logros y trofeos sin ningún esfuerzo. También puedes disfrutar jugando al modo multijugador online con otros jugadores de todo el mundo. Camioneros de Europa 3 Mod APK dinero ilimitado IOS es un juego divertido y emocionante que te hará sentir como un conductor de camión real en Europa. ¡Descárgalo ahora y disfruta del viaje! </p>
42
- <p></p>
43
- <h2>Preguntas frecuentes</h2>
44
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre Camioneros de Europa 3 Mod APK Unlimited Money IOS:</p>
45
- <h3>Q: Es camioneros de Europa 3 Mod APK dinero ilimitado IOS seguro para descargar e instalar? </h3>
46
- <p>A: Sí, Camioneros de Europa 3 Mod APK dinero ilimitado IOS es seguro para descargar e instalar. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. Sin embargo, siempre debe descargarlo de una fuente confiable y escanearlo con un programa antivirus antes de instalarlo. </p>
47
- <h3>Q: Es camioneros de Europa 3 Mod APK dinero ilimitado IOS compatible con mi dispositivo? </h3>
48
- <p>A: Camioneros de Europa 3 Mod APK Unlimited Money IOS es compatible con la mayoría de los dispositivos IOS que se ejecutan en IOS 9.0 o superior. Sin embargo, algunos dispositivos más antiguos pueden experimentar algunos problemas de rendimiento o fallos debido a los altos gráficos y la física del juego. Puedes comprobar la compatibilidad de tu dispositivo en la página de descarga o en el sitio web oficial del juego. </p>
49
- <h3>Q: ¿Cómo puedo actualizar Camioneros de Europa 3 Mod APK dinero ilimitado IOS? </h3>
50
- <p>A: Camioneros de Europa 3 Mod APK dinero ilimitado IOS se actualiza regularmente por los desarrolladores para corregir errores, mejorar las características, y añadir nuevo contenido. Puedes consultar las actualizaciones en la página de descarga o en el sitio web oficial del juego. También puedes habilitar actualizaciones automáticas en la configuración de tu dispositivo para obtener la última versión del juego tan pronto como esté disponible. </p>
51
- <h3>P: ¿Cómo puedo contactar a los desarrolladores de Truckers of Europe 3 Mod APK Unlimited Money IOS? </h3>
52
-
53
- <h3>P: ¿Cómo puedo apoyar a los desarrolladores de Truckers of Europe 3 Mod APK Unlimited Money IOS? </h3>
54
- <p>A: Usted puede apoyar a los desarrolladores de Camioneros de Europa 3 Mod APK Unlimited Money IOS por calificación y revisión del juego en la página de descarga o el sitio web oficial del juego. También puedes compartir el juego con tus amigos y familiares en las redes sociales u otras plataformas. También puedes comprar algunos elementos del juego o funciones premium para apoyar su trabajo y desarrollo. </p> 64aa2da5cf<br />
55
- <br />
56
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fifa Mobile Ftbol Mod Apk Dinero Ilimitado.md DELETED
@@ -1,52 +0,0 @@
1
-
2
- <h1>Cómo descargar FIFA Mobile Soccer Mod APK dinero ilimitado</h1>
3
- <p>Si eres fanático de los juegos de fútbol, debes haber oído hablar de FIFA Mobile Soccer, uno de los juegos de fútbol más populares y realistas en dispositivos móviles. Desarrollado por EA Sports, FIFA Mobile Soccer te permite construir tu mejor equipo de estrellas de fútbol, competir en varios modos y experimentar la emoción de la Copa Mundial de la FIFA.</p>
4
- <p>Sin embargo, por mucho que disfrutes jugando FIFA Mobile Soccer, es posible que también te sientas frustrado por la cantidad limitada de dinero y monedas que tienes en el juego. El dinero y las monedas son recursos esenciales que te permiten comprar jugadores, mejorar tu equipo, desbloquear nuevas funciones y mucho más. Sin suficiente dinero y monedas, es posible que no pueda disfrutar de todo el potencial del juego. </p>
5
- <h2>descargar fifa mobile fútbol mod apk dinero ilimitado</h2><br /><p><b><b>Download Zip</b> &#9913;&#9913;&#9913; <a href="https://bltlly.com/2v6Jd8">https://bltlly.com/2v6Jd8</a></b></p><br /><br />
6
- <p>Es por eso que muchos jugadores están buscando maneras de descargar FIFA Mobile Soccer Mod APK dinero ilimitado. Una versión modificada del juego que le da acceso a dinero y monedas ilimitadas, así como otras características que mejoran su experiencia de juego. En este artículo, le mostraremos cómo descargar FIFA Mobile Soccer Mod APK dinero ilimitado, ¿cuáles son sus características, y cómo instalarlo en su dispositivo. </p>
7
- <h2>Características de FIFA Mobile Soccer Mod APK</h2>
8
- <p>FIFA Mobile Soccer Mod APK no es solo una versión regular del juego con dinero y monedas ilimitadas. También tiene muchas otras características que lo hacen más divertido y emocionante para jugar. Aquí están algunas de las características de FIFA Mobile Soccer Mod APK:</p>
9
- <h3>Desbloqueado todos los jugadores y equipos</h3>
10
- <p>Con FIFA Mobile Soccer Mod APK, puede desbloquear todos los jugadores y equipos en el juego, incluyendo los que son exclusivos para ciertos eventos o temporadas. Usted puede elegir entre más de 15.000 auténticas estrellas de fútbol de más de 600 equipos, incluyendo Chelsea, Paris SG, Real Madrid, Liverpool, Juventus, y más. También puedes crear tu propio equipo personalizado con tus jugadores favoritos. </p>
11
- <h3>Dinero y monedas ilimitados</h3>
12
-
13
- <h3>Mod de menú con opciones de personalización</h3>
14
- <p>FIFA Mobile Soccer Mod APK también viene con un menú mod que le da más control sobre el juego. Puedes acceder al mod del menú pulsando un icono flotante en la pantalla. Desde allí, puedes personalizar varios aspectos del juego, como:</p>
15
- <ul>
16
- <li>El nivel de dificultad del juego</li>
17
- <li>La velocidad del juego</li>
18
- <li>El tamaño de los jugadores</li>
19
- <li>El ángulo de la cámara</li>
20
- <li>Las condiciones meteorológicas</li>
21
- <li>Los efectos de sonido</li>
22
- <li>La calidad gráfica</li>
23
- <li> <h3>Gráficos y efectos de sonido de alta calidad</h3>
24
- <p>FIFA Mobile Soccer Mod APK también mejora los gráficos y efectos de sonido del juego, por lo que es más realista y envolvente. Puede disfrutar de las impresionantes imágenes de los estadios, los jugadores, el balón y las animaciones. También se pueden escuchar los vítores de la multitud, los comentarios de los locutores y el sonido de la bola golpeando la red. </p>
25
- <h2>Cómo descargar e instalar FIFA Mobile Soccer Mod APK</h2>
26
- <p>Ahora que conoces las características de FIFA Mobile Soccer Mod APK, es posible que se pregunte cómo descargar e instalar en su dispositivo. No te preocupes, es muy fácil y sencillo. Solo sigue estos pasos:</p>
27
- <h3>Paso 1: Habilitar fuentes desconocidas en su dispositivo</h3>
28
- <p>Antes de que pueda instalar FIFA Mobile Soccer Mod APK, es necesario habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones que no son de la tienda oficial de Google Play. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas. Active la opción para permitir fuentes desconocidas. </p>
29
- <p></p>
30
- <h3>Paso 2: Descargar FIFA Mobile Soccer Mod APK Archivo de una fuente de confianza</h3>
31
-
32
- <h3>Paso 3: Localizar e instalar el archivo APK en su dispositivo</h3>
33
- <p>Después de descargar el archivo FIFA Mobile Soccer Mod APK, es necesario localizar e instalar en su dispositivo. Para hacer esto, vaya a su aplicación de administrador de archivos y luego busque la carpeta donde guardó el archivo APK. Toque en el archivo para iniciar el proceso de instalación. Es posible que vea una ventana emergente pidiendo su permiso para instalar la aplicación. Simplemente toque en instalar y espere unos segundos hasta que se complete la instalación. </p>
34
- <h3>Paso 4: Iniciar el juego y disfrutar de dinero ilimitado</h3>
35
- <p>Felicidades! Usted ha instalado con éxito FIFA Mobile Soccer Mod APK en su dispositivo. Ahora puede iniciar el juego y disfrutar de dinero ilimitado y otras características. Puede acceder al mod del menú pulsando un icono flotante en la pantalla. A partir de ahí, puede personalizar varios aspectos del juego como desee. </p>
36
- <h2>Conclusión</h2>
37
- <p>FIFA Mobile Soccer es uno de los mejores juegos de fútbol en dispositivos móviles. Le ofrece una experiencia de fútbol realista y emocionante con gráficos de alta calidad y efectos de sonido. Sin embargo, si quieres disfrutar del juego aún más, usted debe descargar FIFA Mobile Soccer Mod APK dinero ilimitado. Esta versión modificada del juego le da acceso a dinero y monedas ilimitadas, así como otras características que mejoran su experiencia de juego. Puedes desbloquear a todos los jugadores y equipos, personalizar la configuración del juego y divertirte más jugando a FIFA Mobile Soccer.</p>
38
- <p>Si desea descargar FIFA Mobile Soccer Mod APK dinero ilimitado, solo tienes que seguir los pasos que hemos proporcionado en este artículo. Es muy fácil y simple. Solo asegúrese de descargar FIFA Mobile Soccer Mod APK de una fuente de confianza como [este enlace]. Esto asegurará que usted consigue una versión segura y de trabajo de FIFA Mobile Soccer Mod APK.</p>
39
- <p>Entonces, ¿qué estás esperando? Descargar FIFA Mobile Soccer Mod APK dinero ilimitado ahora y disfrutar de jugar al fútbol como nunca antes! </p>
40
- <h2>Preguntas frecuentes</h2>
41
- <h3> ¿Es FIFA Mobile Soccer Mod APK seguro de descargar y usar? </h3>
42
-
43
- <h3>¿Necesito rootear mi dispositivo para usar FIFA Mobile Soccer Mod APK? </h3>
44
- <p>No, no es necesario rootear el dispositivo para usar FIFA Mobile Soccer Mod APK. La versión modificada del juego funciona bien tanto en dispositivos rooteados como no. </p>
45
- <h3>¿Puedo jugar en línea con FIFA Mobile Soccer Mod APK? </h3>
46
- <p>Sí, se puede jugar en línea con FIFA Mobile Soccer Mod APK. Sin embargo, es posible que se enfrenten a algunos problemas o errores al jugar en línea con otros jugadores que están utilizando la versión original del juego. Para evitar esto, le sugerimos que juegue sin conexión o con otros jugadores que también están utilizando FIFA Mobile Soccer Mod APK.</p>
47
- <h3>¿Cómo puedo actualizar FIFA Mobile Soccer Mod APK? </ <h3> ¿Cómo puedo actualizar FIFA Mobile Soccer Mod APK? </h3>
48
- <p>Para actualizar FIFA Mobile Soccer Mod APK, es necesario descargar la última versión del juego modded de la misma fuente donde se descargó la versión anterior. Puede consultar las actualizaciones visitando [este enlace] regularmente. Una vez que descargue la última versión de FIFA Mobile Soccer Mod APK, es necesario desinstalar la versión anterior e instalar el nuevo siguiendo los mismos pasos que hemos proporcionado en este artículo. </p>
49
- <h3>¿Dónde puedo encontrar más juegos modded como FIFA Mobile Soccer? </h3>
50
- <p>Si estás buscando juegos más modded como FIFA Mobile Soccer, puedes visitar [este sitio web]. Esta es una fuente confiable y confiable que le ofrece una amplia gama de juegos modificados para varios géneros y plataformas. Puedes encontrar juegos de acción, aventura, carreras, deportes, simulación, estrategia y más. También puede solicitar juegos modificados que no están disponibles en el sitio web. </p> 64aa2da5cf<br />
51
- <br />
52
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/main.py DELETED
@@ -1,79 +0,0 @@
1
- """Primary application entrypoint.
2
- """
3
- import locale
4
- import logging
5
- import os
6
- import sys
7
- import warnings
8
- from typing import List, Optional
9
-
10
- from pip._internal.cli.autocompletion import autocomplete
11
- from pip._internal.cli.main_parser import parse_command
12
- from pip._internal.commands import create_command
13
- from pip._internal.exceptions import PipError
14
- from pip._internal.utils import deprecation
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- # Do not import and use main() directly! Using it directly is actively
20
- # discouraged by pip's maintainers. The name, location and behavior of
21
- # this function is subject to change, so calling it directly is not
22
- # portable across different pip versions.
23
-
24
- # In addition, running pip in-process is unsupported and unsafe. This is
25
- # elaborated in detail at
26
- # https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program.
27
- # That document also provides suggestions that should work for nearly
28
- # all users that are considering importing and using main() directly.
29
-
30
- # However, we know that certain users will still want to invoke pip
31
- # in-process. If you understand and accept the implications of using pip
32
- # in an unsupported manner, the best approach is to use runpy to avoid
33
- # depending on the exact location of this entry point.
34
-
35
- # The following example shows how to use runpy to invoke pip in that
36
- # case:
37
- #
38
- # sys.argv = ["pip", your, args, here]
39
- # runpy.run_module("pip", run_name="__main__")
40
- #
41
- # Note that this will exit the process after running, unlike a direct
42
- # call to main. As it is not safe to do any processing after calling
43
- # main, this should not be an issue in practice.
44
-
45
-
46
- def main(args: Optional[List[str]] = None) -> int:
47
- if args is None:
48
- args = sys.argv[1:]
49
-
50
- # Suppress the pkg_resources deprecation warning
51
- # Note - we use a module of .*pkg_resources to cover
52
- # the normal case (pip._vendor.pkg_resources) and the
53
- # devendored case (a bare pkg_resources)
54
- warnings.filterwarnings(
55
- action="ignore", category=DeprecationWarning, module=".*pkg_resources"
56
- )
57
-
58
- # Configure our deprecation warnings to be sent through loggers
59
- deprecation.install_warning_logger()
60
-
61
- autocomplete()
62
-
63
- try:
64
- cmd_name, cmd_args = parse_command(args)
65
- except PipError as exc:
66
- sys.stderr.write(f"ERROR: {exc}")
67
- sys.stderr.write(os.linesep)
68
- sys.exit(1)
69
-
70
- # Needed for locale.getpreferredencoding(False) to work
71
- # in pip._internal.utils.encoding.auto_decode
72
- try:
73
- locale.setlocale(locale.LC_ALL, "")
74
- except locale.Error as e:
75
- # setlocale can apparently crash if locale are uninitialized
76
- logger.debug("Ignoring error %s when setting locale", e)
77
- command = create_command(cmd_name, isolated=("--isolated" in cmd_args))
78
-
79
- return command.main(cmd_args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/uninstall.py DELETED
@@ -1,113 +0,0 @@
1
- import logging
2
- from optparse import Values
3
- from typing import List
4
-
5
- from pip._vendor.packaging.utils import canonicalize_name
6
-
7
- from pip._internal.cli import cmdoptions
8
- from pip._internal.cli.base_command import Command
9
- from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root
10
- from pip._internal.cli.status_codes import SUCCESS
11
- from pip._internal.exceptions import InstallationError
12
- from pip._internal.req import parse_requirements
13
- from pip._internal.req.constructors import (
14
- install_req_from_line,
15
- install_req_from_parsed_requirement,
16
- )
17
- from pip._internal.utils.misc import (
18
- check_externally_managed,
19
- protect_pip_from_modification_on_windows,
20
- )
21
-
22
- logger = logging.getLogger(__name__)
23
-
24
-
25
- class UninstallCommand(Command, SessionCommandMixin):
26
- """
27
- Uninstall packages.
28
-
29
- pip is able to uninstall most installed packages. Known exceptions are:
30
-
31
- - Pure distutils packages installed with ``python setup.py install``, which
32
- leave behind no metadata to determine what files were installed.
33
- - Script wrappers installed by ``python setup.py develop``.
34
- """
35
-
36
- usage = """
37
- %prog [options] <package> ...
38
- %prog [options] -r <requirements file> ..."""
39
-
40
- def add_options(self) -> None:
41
- self.cmd_opts.add_option(
42
- "-r",
43
- "--requirement",
44
- dest="requirements",
45
- action="append",
46
- default=[],
47
- metavar="file",
48
- help=(
49
- "Uninstall all the packages listed in the given requirements "
50
- "file. This option can be used multiple times."
51
- ),
52
- )
53
- self.cmd_opts.add_option(
54
- "-y",
55
- "--yes",
56
- dest="yes",
57
- action="store_true",
58
- help="Don't ask for confirmation of uninstall deletions.",
59
- )
60
- self.cmd_opts.add_option(cmdoptions.root_user_action())
61
- self.cmd_opts.add_option(cmdoptions.override_externally_managed())
62
- self.parser.insert_option_group(0, self.cmd_opts)
63
-
64
- def run(self, options: Values, args: List[str]) -> int:
65
- session = self.get_default_session(options)
66
-
67
- reqs_to_uninstall = {}
68
- for name in args:
69
- req = install_req_from_line(
70
- name,
71
- isolated=options.isolated_mode,
72
- )
73
- if req.name:
74
- reqs_to_uninstall[canonicalize_name(req.name)] = req
75
- else:
76
- logger.warning(
77
- "Invalid requirement: %r ignored -"
78
- " the uninstall command expects named"
79
- " requirements.",
80
- name,
81
- )
82
- for filename in options.requirements:
83
- for parsed_req in parse_requirements(
84
- filename, options=options, session=session
85
- ):
86
- req = install_req_from_parsed_requirement(
87
- parsed_req, isolated=options.isolated_mode
88
- )
89
- if req.name:
90
- reqs_to_uninstall[canonicalize_name(req.name)] = req
91
- if not reqs_to_uninstall:
92
- raise InstallationError(
93
- f"You must give at least one requirement to {self.name} (see "
94
- f'"pip help {self.name}")'
95
- )
96
-
97
- if not options.override_externally_managed:
98
- check_externally_managed()
99
-
100
- protect_pip_from_modification_on_windows(
101
- modifying_pip="pip" in reqs_to_uninstall
102
- )
103
-
104
- for req in reqs_to_uninstall.values():
105
- uninstall_pathset = req.uninstall(
106
- auto_confirm=options.yes,
107
- verbose=self.verbosity > 0,
108
- )
109
- if uninstall_pathset:
110
- uninstall_pathset.commit()
111
- if options.root_user_action == "warn":
112
- warn_if_run_as_root()
113
- return SUCCESS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py DELETED
@@ -1,698 +0,0 @@
1
- """Imported from the recipes section of the itertools documentation.
2
-
3
- All functions taken from the recipes section of the itertools library docs
4
- [1]_.
5
- Some backward-compatible usability improvements have been made.
6
-
7
- .. [1] http://docs.python.org/library/itertools.html#recipes
8
-
9
- """
10
- import warnings
11
- from collections import deque
12
- from itertools import (
13
- chain,
14
- combinations,
15
- count,
16
- cycle,
17
- groupby,
18
- islice,
19
- repeat,
20
- starmap,
21
- tee,
22
- zip_longest,
23
- )
24
- import operator
25
- from random import randrange, sample, choice
26
-
27
- __all__ = [
28
- 'all_equal',
29
- 'before_and_after',
30
- 'consume',
31
- 'convolve',
32
- 'dotproduct',
33
- 'first_true',
34
- 'flatten',
35
- 'grouper',
36
- 'iter_except',
37
- 'ncycles',
38
- 'nth',
39
- 'nth_combination',
40
- 'padnone',
41
- 'pad_none',
42
- 'pairwise',
43
- 'partition',
44
- 'powerset',
45
- 'prepend',
46
- 'quantify',
47
- 'random_combination_with_replacement',
48
- 'random_combination',
49
- 'random_permutation',
50
- 'random_product',
51
- 'repeatfunc',
52
- 'roundrobin',
53
- 'sliding_window',
54
- 'tabulate',
55
- 'tail',
56
- 'take',
57
- 'triplewise',
58
- 'unique_everseen',
59
- 'unique_justseen',
60
- ]
61
-
62
-
63
- def take(n, iterable):
64
- """Return first *n* items of the iterable as a list.
65
-
66
- >>> take(3, range(10))
67
- [0, 1, 2]
68
-
69
- If there are fewer than *n* items in the iterable, all of them are
70
- returned.
71
-
72
- >>> take(10, range(3))
73
- [0, 1, 2]
74
-
75
- """
76
- return list(islice(iterable, n))
77
-
78
-
79
- def tabulate(function, start=0):
80
- """Return an iterator over the results of ``func(start)``,
81
- ``func(start + 1)``, ``func(start + 2)``...
82
-
83
- *func* should be a function that accepts one integer argument.
84
-
85
- If *start* is not specified it defaults to 0. It will be incremented each
86
- time the iterator is advanced.
87
-
88
- >>> square = lambda x: x ** 2
89
- >>> iterator = tabulate(square, -3)
90
- >>> take(4, iterator)
91
- [9, 4, 1, 0]
92
-
93
- """
94
- return map(function, count(start))
95
-
96
-
97
- def tail(n, iterable):
98
- """Return an iterator over the last *n* items of *iterable*.
99
-
100
- >>> t = tail(3, 'ABCDEFG')
101
- >>> list(t)
102
- ['E', 'F', 'G']
103
-
104
- """
105
- return iter(deque(iterable, maxlen=n))
106
-
107
-
108
- def consume(iterator, n=None):
109
- """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
110
- entirely.
111
-
112
- Efficiently exhausts an iterator without returning values. Defaults to
113
- consuming the whole iterator, but an optional second argument may be
114
- provided to limit consumption.
115
-
116
- >>> i = (x for x in range(10))
117
- >>> next(i)
118
- 0
119
- >>> consume(i, 3)
120
- >>> next(i)
121
- 4
122
- >>> consume(i)
123
- >>> next(i)
124
- Traceback (most recent call last):
125
- File "<stdin>", line 1, in <module>
126
- StopIteration
127
-
128
- If the iterator has fewer items remaining than the provided limit, the
129
- whole iterator will be consumed.
130
-
131
- >>> i = (x for x in range(3))
132
- >>> consume(i, 5)
133
- >>> next(i)
134
- Traceback (most recent call last):
135
- File "<stdin>", line 1, in <module>
136
- StopIteration
137
-
138
- """
139
- # Use functions that consume iterators at C speed.
140
- if n is None:
141
- # feed the entire iterator into a zero-length deque
142
- deque(iterator, maxlen=0)
143
- else:
144
- # advance to the empty slice starting at position n
145
- next(islice(iterator, n, n), None)
146
-
147
-
148
- def nth(iterable, n, default=None):
149
- """Returns the nth item or a default value.
150
-
151
- >>> l = range(10)
152
- >>> nth(l, 3)
153
- 3
154
- >>> nth(l, 20, "zebra")
155
- 'zebra'
156
-
157
- """
158
- return next(islice(iterable, n, None), default)
159
-
160
-
161
- def all_equal(iterable):
162
- """
163
- Returns ``True`` if all the elements are equal to each other.
164
-
165
- >>> all_equal('aaaa')
166
- True
167
- >>> all_equal('aaab')
168
- False
169
-
170
- """
171
- g = groupby(iterable)
172
- return next(g, True) and not next(g, False)
173
-
174
-
175
- def quantify(iterable, pred=bool):
176
- """Return the how many times the predicate is true.
177
-
178
- >>> quantify([True, False, True])
179
- 2
180
-
181
- """
182
- return sum(map(pred, iterable))
183
-
184
-
185
- def pad_none(iterable):
186
- """Returns the sequence of elements and then returns ``None`` indefinitely.
187
-
188
- >>> take(5, pad_none(range(3)))
189
- [0, 1, 2, None, None]
190
-
191
- Useful for emulating the behavior of the built-in :func:`map` function.
192
-
193
- See also :func:`padded`.
194
-
195
- """
196
- return chain(iterable, repeat(None))
197
-
198
-
199
- padnone = pad_none
200
-
201
-
202
- def ncycles(iterable, n):
203
- """Returns the sequence elements *n* times
204
-
205
- >>> list(ncycles(["a", "b"], 3))
206
- ['a', 'b', 'a', 'b', 'a', 'b']
207
-
208
- """
209
- return chain.from_iterable(repeat(tuple(iterable), n))
210
-
211
-
212
- def dotproduct(vec1, vec2):
213
- """Returns the dot product of the two iterables.
214
-
215
- >>> dotproduct([10, 10], [20, 20])
216
- 400
217
-
218
- """
219
- return sum(map(operator.mul, vec1, vec2))
220
-
221
-
222
- def flatten(listOfLists):
223
- """Return an iterator flattening one level of nesting in a list of lists.
224
-
225
- >>> list(flatten([[0, 1], [2, 3]]))
226
- [0, 1, 2, 3]
227
-
228
- See also :func:`collapse`, which can flatten multiple levels of nesting.
229
-
230
- """
231
- return chain.from_iterable(listOfLists)
232
-
233
-
234
- def repeatfunc(func, times=None, *args):
235
- """Call *func* with *args* repeatedly, returning an iterable over the
236
- results.
237
-
238
- If *times* is specified, the iterable will terminate after that many
239
- repetitions:
240
-
241
- >>> from operator import add
242
- >>> times = 4
243
- >>> args = 3, 5
244
- >>> list(repeatfunc(add, times, *args))
245
- [8, 8, 8, 8]
246
-
247
- If *times* is ``None`` the iterable will not terminate:
248
-
249
- >>> from random import randrange
250
- >>> times = None
251
- >>> args = 1, 11
252
- >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
253
- [2, 4, 8, 1, 8, 4]
254
-
255
- """
256
- if times is None:
257
- return starmap(func, repeat(args))
258
- return starmap(func, repeat(args, times))
259
-
260
-
261
- def _pairwise(iterable):
262
- """Returns an iterator of paired items, overlapping, from the original
263
-
264
- >>> take(4, pairwise(count()))
265
- [(0, 1), (1, 2), (2, 3), (3, 4)]
266
-
267
- On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
268
-
269
- """
270
- a, b = tee(iterable)
271
- next(b, None)
272
- yield from zip(a, b)
273
-
274
-
275
- try:
276
- from itertools import pairwise as itertools_pairwise
277
- except ImportError:
278
- pairwise = _pairwise
279
- else:
280
-
281
- def pairwise(iterable):
282
- yield from itertools_pairwise(iterable)
283
-
284
- pairwise.__doc__ = _pairwise.__doc__
285
-
286
-
287
- def grouper(iterable, n, fillvalue=None):
288
- """Collect data into fixed-length chunks or blocks.
289
-
290
- >>> list(grouper('ABCDEFG', 3, 'x'))
291
- [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
292
-
293
- """
294
- if isinstance(iterable, int):
295
- warnings.warn(
296
- "grouper expects iterable as first parameter", DeprecationWarning
297
- )
298
- n, iterable = iterable, n
299
- args = [iter(iterable)] * n
300
- return zip_longest(fillvalue=fillvalue, *args)
301
-
302
-
303
- def roundrobin(*iterables):
304
- """Yields an item from each iterable, alternating between them.
305
-
306
- >>> list(roundrobin('ABC', 'D', 'EF'))
307
- ['A', 'D', 'E', 'B', 'F', 'C']
308
-
309
- This function produces the same output as :func:`interleave_longest`, but
310
- may perform better for some inputs (in particular when the number of
311
- iterables is small).
312
-
313
- """
314
- # Recipe credited to George Sakkis
315
- pending = len(iterables)
316
- nexts = cycle(iter(it).__next__ for it in iterables)
317
- while pending:
318
- try:
319
- for next in nexts:
320
- yield next()
321
- except StopIteration:
322
- pending -= 1
323
- nexts = cycle(islice(nexts, pending))
324
-
325
-
326
- def partition(pred, iterable):
327
- """
328
- Returns a 2-tuple of iterables derived from the input iterable.
329
- The first yields the items that have ``pred(item) == False``.
330
- The second yields the items that have ``pred(item) == True``.
331
-
332
- >>> is_odd = lambda x: x % 2 != 0
333
- >>> iterable = range(10)
334
- >>> even_items, odd_items = partition(is_odd, iterable)
335
- >>> list(even_items), list(odd_items)
336
- ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
337
-
338
- If *pred* is None, :func:`bool` is used.
339
-
340
- >>> iterable = [0, 1, False, True, '', ' ']
341
- >>> false_items, true_items = partition(None, iterable)
342
- >>> list(false_items), list(true_items)
343
- ([0, False, ''], [1, True, ' '])
344
-
345
- """
346
- if pred is None:
347
- pred = bool
348
-
349
- evaluations = ((pred(x), x) for x in iterable)
350
- t1, t2 = tee(evaluations)
351
- return (
352
- (x for (cond, x) in t1 if not cond),
353
- (x for (cond, x) in t2 if cond),
354
- )
355
-
356
-
357
- def powerset(iterable):
358
- """Yields all possible subsets of the iterable.
359
-
360
- >>> list(powerset([1, 2, 3]))
361
- [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
362
-
363
- :func:`powerset` will operate on iterables that aren't :class:`set`
364
- instances, so repeated elements in the input will produce repeated elements
365
- in the output. Use :func:`unique_everseen` on the input to avoid generating
366
- duplicates:
367
-
368
- >>> seq = [1, 1, 0]
369
- >>> list(powerset(seq))
370
- [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
371
- >>> from more_itertools import unique_everseen
372
- >>> list(powerset(unique_everseen(seq)))
373
- [(), (1,), (0,), (1, 0)]
374
-
375
- """
376
- s = list(iterable)
377
- return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
378
-
379
-
380
- def unique_everseen(iterable, key=None):
381
- """
382
- Yield unique elements, preserving order.
383
-
384
- >>> list(unique_everseen('AAAABBBCCDAABBB'))
385
- ['A', 'B', 'C', 'D']
386
- >>> list(unique_everseen('ABBCcAD', str.lower))
387
- ['A', 'B', 'C', 'D']
388
-
389
- Sequences with a mix of hashable and unhashable items can be used.
390
- The function will be slower (i.e., `O(n^2)`) for unhashable items.
391
-
392
- Remember that ``list`` objects are unhashable - you can use the *key*
393
- parameter to transform the list to a tuple (which is hashable) to
394
- avoid a slowdown.
395
-
396
- >>> iterable = ([1, 2], [2, 3], [1, 2])
397
- >>> list(unique_everseen(iterable)) # Slow
398
- [[1, 2], [2, 3]]
399
- >>> list(unique_everseen(iterable, key=tuple)) # Faster
400
- [[1, 2], [2, 3]]
401
-
402
- Similary, you may want to convert unhashable ``set`` objects with
403
- ``key=frozenset``. For ``dict`` objects,
404
- ``key=lambda x: frozenset(x.items())`` can be used.
405
-
406
- """
407
- seenset = set()
408
- seenset_add = seenset.add
409
- seenlist = []
410
- seenlist_add = seenlist.append
411
- use_key = key is not None
412
-
413
- for element in iterable:
414
- k = key(element) if use_key else element
415
- try:
416
- if k not in seenset:
417
- seenset_add(k)
418
- yield element
419
- except TypeError:
420
- if k not in seenlist:
421
- seenlist_add(k)
422
- yield element
423
-
424
-
425
- def unique_justseen(iterable, key=None):
426
- """Yields elements in order, ignoring serial duplicates
427
-
428
- >>> list(unique_justseen('AAAABBBCCDAABBB'))
429
- ['A', 'B', 'C', 'D', 'A', 'B']
430
- >>> list(unique_justseen('ABBCcAD', str.lower))
431
- ['A', 'B', 'C', 'A', 'D']
432
-
433
- """
434
- return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
435
-
436
-
437
- def iter_except(func, exception, first=None):
438
- """Yields results from a function repeatedly until an exception is raised.
439
-
440
- Converts a call-until-exception interface to an iterator interface.
441
- Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
442
- to end the loop.
443
-
444
- >>> l = [0, 1, 2]
445
- >>> list(iter_except(l.pop, IndexError))
446
- [2, 1, 0]
447
-
448
- Multiple exceptions can be specified as a stopping condition:
449
-
450
- >>> l = [1, 2, 3, '...', 4, 5, 6]
451
- >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
452
- [7, 6, 5]
453
- >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
454
- [4, 3, 2]
455
- >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
456
- []
457
-
458
- """
459
- try:
460
- if first is not None:
461
- yield first()
462
- while 1:
463
- yield func()
464
- except exception:
465
- pass
466
-
467
-
468
- def first_true(iterable, default=None, pred=None):
469
- """
470
- Returns the first true value in the iterable.
471
-
472
- If no true value is found, returns *default*
473
-
474
- If *pred* is not None, returns the first item for which
475
- ``pred(item) == True`` .
476
-
477
- >>> first_true(range(10))
478
- 1
479
- >>> first_true(range(10), pred=lambda x: x > 5)
480
- 6
481
- >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
482
- 'missing'
483
-
484
- """
485
- return next(filter(pred, iterable), default)
486
-
487
-
488
- def random_product(*args, repeat=1):
489
- """Draw an item at random from each of the input iterables.
490
-
491
- >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
492
- ('c', 3, 'Z')
493
-
494
- If *repeat* is provided as a keyword argument, that many items will be
495
- drawn from each iterable.
496
-
497
- >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
498
- ('a', 2, 'd', 3)
499
-
500
- This equivalent to taking a random selection from
501
- ``itertools.product(*args, **kwarg)``.
502
-
503
- """
504
- pools = [tuple(pool) for pool in args] * repeat
505
- return tuple(choice(pool) for pool in pools)
506
-
507
-
508
- def random_permutation(iterable, r=None):
509
- """Return a random *r* length permutation of the elements in *iterable*.
510
-
511
- If *r* is not specified or is ``None``, then *r* defaults to the length of
512
- *iterable*.
513
-
514
- >>> random_permutation(range(5)) # doctest:+SKIP
515
- (3, 4, 0, 1, 2)
516
-
517
- This equivalent to taking a random selection from
518
- ``itertools.permutations(iterable, r)``.
519
-
520
- """
521
- pool = tuple(iterable)
522
- r = len(pool) if r is None else r
523
- return tuple(sample(pool, r))
524
-
525
-
526
- def random_combination(iterable, r):
527
- """Return a random *r* length subsequence of the elements in *iterable*.
528
-
529
- >>> random_combination(range(5), 3) # doctest:+SKIP
530
- (2, 3, 4)
531
-
532
- This equivalent to taking a random selection from
533
- ``itertools.combinations(iterable, r)``.
534
-
535
- """
536
- pool = tuple(iterable)
537
- n = len(pool)
538
- indices = sorted(sample(range(n), r))
539
- return tuple(pool[i] for i in indices)
540
-
541
-
542
- def random_combination_with_replacement(iterable, r):
543
- """Return a random *r* length subsequence of elements in *iterable*,
544
- allowing individual elements to be repeated.
545
-
546
- >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
547
- (0, 0, 1, 2, 2)
548
-
549
- This equivalent to taking a random selection from
550
- ``itertools.combinations_with_replacement(iterable, r)``.
551
-
552
- """
553
- pool = tuple(iterable)
554
- n = len(pool)
555
- indices = sorted(randrange(n) for i in range(r))
556
- return tuple(pool[i] for i in indices)
557
-
558
-
559
- def nth_combination(iterable, r, index):
560
- """Equivalent to ``list(combinations(iterable, r))[index]``.
561
-
562
- The subsequences of *iterable* that are of length *r* can be ordered
563
- lexicographically. :func:`nth_combination` computes the subsequence at
564
- sort position *index* directly, without computing the previous
565
- subsequences.
566
-
567
- >>> nth_combination(range(5), 3, 5)
568
- (0, 3, 4)
569
-
570
- ``ValueError`` will be raised If *r* is negative or greater than the length
571
- of *iterable*.
572
- ``IndexError`` will be raised if the given *index* is invalid.
573
- """
574
- pool = tuple(iterable)
575
- n = len(pool)
576
- if (r < 0) or (r > n):
577
- raise ValueError
578
-
579
- c = 1
580
- k = min(r, n - r)
581
- for i in range(1, k + 1):
582
- c = c * (n - k + i) // i
583
-
584
- if index < 0:
585
- index += c
586
-
587
- if (index < 0) or (index >= c):
588
- raise IndexError
589
-
590
- result = []
591
- while r:
592
- c, n, r = c * r // n, n - 1, r - 1
593
- while index >= c:
594
- index -= c
595
- c, n = c * (n - r) // n, n - 1
596
- result.append(pool[-1 - n])
597
-
598
- return tuple(result)
599
-
600
-
601
- def prepend(value, iterator):
602
- """Yield *value*, followed by the elements in *iterator*.
603
-
604
- >>> value = '0'
605
- >>> iterator = ['1', '2', '3']
606
- >>> list(prepend(value, iterator))
607
- ['0', '1', '2', '3']
608
-
609
- To prepend multiple values, see :func:`itertools.chain`
610
- or :func:`value_chain`.
611
-
612
- """
613
- return chain([value], iterator)
614
-
615
-
616
- def convolve(signal, kernel):
617
- """Convolve the iterable *signal* with the iterable *kernel*.
618
-
619
- >>> signal = (1, 2, 3, 4, 5)
620
- >>> kernel = [3, 2, 1]
621
- >>> list(convolve(signal, kernel))
622
- [3, 8, 14, 20, 26, 14, 5]
623
-
624
- Note: the input arguments are not interchangeable, as the *kernel*
625
- is immediately consumed and stored.
626
-
627
- """
628
- kernel = tuple(kernel)[::-1]
629
- n = len(kernel)
630
- window = deque([0], maxlen=n) * n
631
- for x in chain(signal, repeat(0, n - 1)):
632
- window.append(x)
633
- yield sum(map(operator.mul, kernel, window))
634
-
635
-
636
- def before_and_after(predicate, it):
637
- """A variant of :func:`takewhile` that allows complete access to the
638
- remainder of the iterator.
639
-
640
- >>> it = iter('ABCdEfGhI')
641
- >>> all_upper, remainder = before_and_after(str.isupper, it)
642
- >>> ''.join(all_upper)
643
- 'ABC'
644
- >>> ''.join(remainder) # takewhile() would lose the 'd'
645
- 'dEfGhI'
646
-
647
- Note that the first iterator must be fully consumed before the second
648
- iterator can generate valid results.
649
- """
650
- it = iter(it)
651
- transition = []
652
-
653
- def true_iterator():
654
- for elem in it:
655
- if predicate(elem):
656
- yield elem
657
- else:
658
- transition.append(elem)
659
- return
660
-
661
- def remainder_iterator():
662
- yield from transition
663
- yield from it
664
-
665
- return true_iterator(), remainder_iterator()
666
-
667
-
668
- def triplewise(iterable):
669
- """Return overlapping triplets from *iterable*.
670
-
671
- >>> list(triplewise('ABCDE'))
672
- [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
673
-
674
- """
675
- for (a, _), (b, c) in pairwise(pairwise(iterable)):
676
- yield a, b, c
677
-
678
-
679
- def sliding_window(iterable, n):
680
- """Return a sliding window of width *n* over *iterable*.
681
-
682
- >>> list(sliding_window(range(6), 4))
683
- [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
684
-
685
- If *iterable* has fewer than *n* items, then nothing is yielded:
686
-
687
- >>> list(sliding_window(range(3), 4))
688
- []
689
-
690
- For a variant with more features, see :func:`windowed`.
691
- """
692
- it = iter(iterable)
693
- window = deque(islice(it, n), maxlen=n)
694
- if len(window) == n:
695
- yield tuple(window)
696
- for x in it:
697
- window.append(x)
698
- yield tuple(window)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/easy_install.py DELETED
@@ -1,2312 +0,0 @@
1
- """
2
- Easy Install
3
- ------------
4
-
5
- A tool for doing automatic download/extract/build of distutils-based Python
6
- packages. For detailed documentation, see the accompanying EasyInstall.txt
7
- file, or visit the `EasyInstall home page`__.
8
-
9
- __ https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
10
-
11
- """
12
-
13
- from glob import glob
14
- from distutils.util import get_platform
15
- from distutils.util import convert_path, subst_vars
16
- from distutils.errors import (
17
- DistutilsArgError, DistutilsOptionError,
18
- DistutilsError, DistutilsPlatformError,
19
- )
20
- from distutils import log, dir_util
21
- from distutils.command.build_scripts import first_line_re
22
- from distutils.spawn import find_executable
23
- from distutils.command import install
24
- import sys
25
- import os
26
- import zipimport
27
- import shutil
28
- import tempfile
29
- import zipfile
30
- import re
31
- import stat
32
- import random
33
- import textwrap
34
- import warnings
35
- import site
36
- import struct
37
- import contextlib
38
- import subprocess
39
- import shlex
40
- import io
41
- import configparser
42
- import sysconfig
43
-
44
-
45
- from sysconfig import get_path
46
-
47
- from setuptools import SetuptoolsDeprecationWarning
48
-
49
- from setuptools import Command
50
- from setuptools.sandbox import run_setup
51
- from setuptools.command import setopt
52
- from setuptools.archive_util import unpack_archive
53
- from setuptools.package_index import (
54
- PackageIndex, parse_requirement_arg, URL_SCHEME,
55
- )
56
- from setuptools.command import bdist_egg, egg_info
57
- from setuptools.wheel import Wheel
58
- from pkg_resources import (
59
- normalize_path, resource_string,
60
- get_distribution, find_distributions, Environment, Requirement,
61
- Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
62
- VersionConflict, DEVELOP_DIST,
63
- )
64
- import pkg_resources
65
- from .._path import ensure_directory
66
- from ..extern.jaraco.text import yield_lines
67
-
68
-
69
- # Turn on PEP440Warnings
70
- warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
71
-
72
- __all__ = [
73
- 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
74
- 'get_exe_prefixes',
75
- ]
76
-
77
-
78
- def is_64bit():
79
- return struct.calcsize("P") == 8
80
-
81
-
82
- def _to_bytes(s):
83
- return s.encode('utf8')
84
-
85
-
86
- def isascii(s):
87
- try:
88
- s.encode('ascii')
89
- return True
90
- except UnicodeError:
91
- return False
92
-
93
-
94
- def _one_liner(text):
95
- return textwrap.dedent(text).strip().replace('\n', '; ')
96
-
97
-
98
- class easy_install(Command):
99
- """Manage a download/build/install process"""
100
- description = "Find/get/install Python packages"
101
- command_consumes_arguments = True
102
-
103
- user_options = [
104
- ('prefix=', None, "installation prefix"),
105
- ("zip-ok", "z", "install package as a zipfile"),
106
- ("multi-version", "m", "make apps have to require() a version"),
107
- ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
108
- ("install-dir=", "d", "install package to DIR"),
109
- ("script-dir=", "s", "install scripts to DIR"),
110
- ("exclude-scripts", "x", "Don't install scripts"),
111
- ("always-copy", "a", "Copy all needed packages to install dir"),
112
- ("index-url=", "i", "base URL of Python Package Index"),
113
- ("find-links=", "f", "additional URL(s) to search for packages"),
114
- ("build-directory=", "b",
115
- "download/extract/build in DIR; keep the results"),
116
- ('optimize=', 'O',
117
- "also compile with optimization: -O1 for \"python -O\", "
118
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
119
- ('record=', None,
120
- "filename in which to record list of installed files"),
121
- ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
122
- ('site-dirs=', 'S', "list of directories where .pth files work"),
123
- ('editable', 'e', "Install specified packages in editable form"),
124
- ('no-deps', 'N', "don't install dependencies"),
125
- ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
126
- ('local-snapshots-ok', 'l',
127
- "allow building eggs from local checkouts"),
128
- ('version', None, "print version information and exit"),
129
- ('no-find-links', None,
130
- "Don't load find-links defined in packages being installed"),
131
- ('user', None, "install in user site-package '%s'" % site.USER_SITE)
132
- ]
133
- boolean_options = [
134
- 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
135
- 'editable',
136
- 'no-deps', 'local-snapshots-ok', 'version',
137
- 'user'
138
- ]
139
-
140
- negative_opt = {'always-unzip': 'zip-ok'}
141
- create_index = PackageIndex
142
-
143
- def initialize_options(self):
144
- warnings.warn(
145
- "easy_install command is deprecated. "
146
- "Use build and pip and other standards-based tools.",
147
- EasyInstallDeprecationWarning,
148
- )
149
-
150
- # the --user option seems to be an opt-in one,
151
- # so the default should be False.
152
- self.user = 0
153
- self.zip_ok = self.local_snapshots_ok = None
154
- self.install_dir = self.script_dir = self.exclude_scripts = None
155
- self.index_url = None
156
- self.find_links = None
157
- self.build_directory = None
158
- self.args = None
159
- self.optimize = self.record = None
160
- self.upgrade = self.always_copy = self.multi_version = None
161
- self.editable = self.no_deps = self.allow_hosts = None
162
- self.root = self.prefix = self.no_report = None
163
- self.version = None
164
- self.install_purelib = None # for pure module distributions
165
- self.install_platlib = None # non-pure (dists w/ extensions)
166
- self.install_headers = None # for C/C++ headers
167
- self.install_lib = None # set to either purelib or platlib
168
- self.install_scripts = None
169
- self.install_data = None
170
- self.install_base = None
171
- self.install_platbase = None
172
- self.install_userbase = site.USER_BASE
173
- self.install_usersite = site.USER_SITE
174
- self.no_find_links = None
175
-
176
- # Options not specifiable via command line
177
- self.package_index = None
178
- self.pth_file = self.always_copy_from = None
179
- self.site_dirs = None
180
- self.installed_projects = {}
181
- # Always read easy_install options, even if we are subclassed, or have
182
- # an independent instance created. This ensures that defaults will
183
- # always come from the standard configuration file(s)' "easy_install"
184
- # section, even if this is a "develop" or "install" command, or some
185
- # other embedding.
186
- self._dry_run = None
187
- self.verbose = self.distribution.verbose
188
- self.distribution._set_command_options(
189
- self, self.distribution.get_option_dict('easy_install')
190
- )
191
-
192
- def delete_blockers(self, blockers):
193
- extant_blockers = (
194
- filename for filename in blockers
195
- if os.path.exists(filename) or os.path.islink(filename)
196
- )
197
- list(map(self._delete_path, extant_blockers))
198
-
199
- def _delete_path(self, path):
200
- log.info("Deleting %s", path)
201
- if self.dry_run:
202
- return
203
-
204
- is_tree = os.path.isdir(path) and not os.path.islink(path)
205
- remover = rmtree if is_tree else os.unlink
206
- remover(path)
207
-
208
- @staticmethod
209
- def _render_version():
210
- """
211
- Render the Setuptools version and installation details, then exit.
212
- """
213
- ver = '{}.{}'.format(*sys.version_info)
214
- dist = get_distribution('setuptools')
215
- tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
216
- print(tmpl.format(**locals()))
217
- raise SystemExit()
218
-
219
- def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME
220
- self.version and self._render_version()
221
-
222
- py_version = sys.version.split()[0]
223
-
224
- self.config_vars = dict(sysconfig.get_config_vars())
225
-
226
- self.config_vars.update({
227
- 'dist_name': self.distribution.get_name(),
228
- 'dist_version': self.distribution.get_version(),
229
- 'dist_fullname': self.distribution.get_fullname(),
230
- 'py_version': py_version,
231
- 'py_version_short': f'{sys.version_info.major}.{sys.version_info.minor}',
232
- 'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}',
233
- 'sys_prefix': self.config_vars['prefix'],
234
- 'sys_exec_prefix': self.config_vars['exec_prefix'],
235
- # Only python 3.2+ has abiflags
236
- 'abiflags': getattr(sys, 'abiflags', ''),
237
- 'platlibdir': getattr(sys, 'platlibdir', 'lib'),
238
- })
239
- with contextlib.suppress(AttributeError):
240
- # only for distutils outside stdlib
241
- self.config_vars.update({
242
- 'implementation_lower': install._get_implementation().lower(),
243
- 'implementation': install._get_implementation(),
244
- })
245
-
246
- # pypa/distutils#113 Python 3.9 compat
247
- self.config_vars.setdefault(
248
- 'py_version_nodot_plat',
249
- getattr(sys, 'windir', '').replace('.', ''),
250
- )
251
-
252
- self.config_vars['userbase'] = self.install_userbase
253
- self.config_vars['usersite'] = self.install_usersite
254
- if self.user and not site.ENABLE_USER_SITE:
255
- log.warn("WARNING: The user site-packages directory is disabled.")
256
-
257
- self._fix_install_dir_for_user_site()
258
-
259
- self.expand_basedirs()
260
- self.expand_dirs()
261
-
262
- self._expand(
263
- 'install_dir', 'script_dir', 'build_directory',
264
- 'site_dirs',
265
- )
266
- # If a non-default installation directory was specified, default the
267
- # script directory to match it.
268
- if self.script_dir is None:
269
- self.script_dir = self.install_dir
270
-
271
- if self.no_find_links is None:
272
- self.no_find_links = False
273
-
274
- # Let install_dir get set by install_lib command, which in turn
275
- # gets its info from the install command, and takes into account
276
- # --prefix and --home and all that other crud.
277
- self.set_undefined_options(
278
- 'install_lib', ('install_dir', 'install_dir')
279
- )
280
- # Likewise, set default script_dir from 'install_scripts.install_dir'
281
- self.set_undefined_options(
282
- 'install_scripts', ('install_dir', 'script_dir')
283
- )
284
-
285
- if self.user and self.install_purelib:
286
- self.install_dir = self.install_purelib
287
- self.script_dir = self.install_scripts
288
- # default --record from the install command
289
- self.set_undefined_options('install', ('record', 'record'))
290
- self.all_site_dirs = get_site_dirs()
291
- self.all_site_dirs.extend(self._process_site_dirs(self.site_dirs))
292
-
293
- if not self.editable:
294
- self.check_site_dir()
295
- default_index = os.getenv("__EASYINSTALL_INDEX", "https://pypi.org/simple/")
296
- # ^ Private API for testing purposes only
297
- self.index_url = self.index_url or default_index
298
- self.shadow_path = self.all_site_dirs[:]
299
- for path_item in self.install_dir, normalize_path(self.script_dir):
300
- if path_item not in self.shadow_path:
301
- self.shadow_path.insert(0, path_item)
302
-
303
- if self.allow_hosts is not None:
304
- hosts = [s.strip() for s in self.allow_hosts.split(',')]
305
- else:
306
- hosts = ['*']
307
- if self.package_index is None:
308
- self.package_index = self.create_index(
309
- self.index_url, search_path=self.shadow_path, hosts=hosts,
310
- )
311
- self.local_index = Environment(self.shadow_path + sys.path)
312
-
313
- if self.find_links is not None:
314
- if isinstance(self.find_links, str):
315
- self.find_links = self.find_links.split()
316
- else:
317
- self.find_links = []
318
- if self.local_snapshots_ok:
319
- self.package_index.scan_egg_links(self.shadow_path + sys.path)
320
- if not self.no_find_links:
321
- self.package_index.add_find_links(self.find_links)
322
- self.set_undefined_options('install_lib', ('optimize', 'optimize'))
323
- self.optimize = self._validate_optimize(self.optimize)
324
-
325
- if self.editable and not self.build_directory:
326
- raise DistutilsArgError(
327
- "Must specify a build directory (-b) when using --editable"
328
- )
329
- if not self.args:
330
- raise DistutilsArgError(
331
- "No urls, filenames, or requirements specified (see --help)")
332
-
333
- self.outputs = []
334
-
335
- @staticmethod
336
- def _process_site_dirs(site_dirs):
337
- if site_dirs is None:
338
- return
339
-
340
- normpath = map(normalize_path, sys.path)
341
- site_dirs = [
342
- os.path.expanduser(s.strip()) for s in
343
- site_dirs.split(',')
344
- ]
345
- for d in site_dirs:
346
- if not os.path.isdir(d):
347
- log.warn("%s (in --site-dirs) does not exist", d)
348
- elif normalize_path(d) not in normpath:
349
- raise DistutilsOptionError(
350
- d + " (in --site-dirs) is not on sys.path"
351
- )
352
- else:
353
- yield normalize_path(d)
354
-
355
- @staticmethod
356
- def _validate_optimize(value):
357
- try:
358
- value = int(value)
359
- if value not in range(3):
360
- raise ValueError
361
- except ValueError as e:
362
- raise DistutilsOptionError(
363
- "--optimize must be 0, 1, or 2"
364
- ) from e
365
-
366
- return value
367
-
368
- def _fix_install_dir_for_user_site(self):
369
- """
370
- Fix the install_dir if "--user" was used.
371
- """
372
- if not self.user:
373
- return
374
-
375
- self.create_home_path()
376
- if self.install_userbase is None:
377
- msg = "User base directory is not specified"
378
- raise DistutilsPlatformError(msg)
379
- self.install_base = self.install_platbase = self.install_userbase
380
- scheme_name = f'{os.name}_user'
381
- self.select_scheme(scheme_name)
382
-
383
- def _expand_attrs(self, attrs):
384
- for attr in attrs:
385
- val = getattr(self, attr)
386
- if val is not None:
387
- if os.name == 'posix' or os.name == 'nt':
388
- val = os.path.expanduser(val)
389
- val = subst_vars(val, self.config_vars)
390
- setattr(self, attr, val)
391
-
392
- def expand_basedirs(self):
393
- """Calls `os.path.expanduser` on install_base, install_platbase and
394
- root."""
395
- self._expand_attrs(['install_base', 'install_platbase', 'root'])
396
-
397
- def expand_dirs(self):
398
- """Calls `os.path.expanduser` on install dirs."""
399
- dirs = [
400
- 'install_purelib',
401
- 'install_platlib',
402
- 'install_lib',
403
- 'install_headers',
404
- 'install_scripts',
405
- 'install_data',
406
- ]
407
- self._expand_attrs(dirs)
408
-
409
- def run(self, show_deprecation=True):
410
- if show_deprecation:
411
- self.announce(
412
- "WARNING: The easy_install command is deprecated "
413
- "and will be removed in a future version.",
414
- log.WARN,
415
- )
416
- if self.verbose != self.distribution.verbose:
417
- log.set_verbosity(self.verbose)
418
- try:
419
- for spec in self.args:
420
- self.easy_install(spec, not self.no_deps)
421
- if self.record:
422
- outputs = self.outputs
423
- if self.root: # strip any package prefix
424
- root_len = len(self.root)
425
- for counter in range(len(outputs)):
426
- outputs[counter] = outputs[counter][root_len:]
427
- from distutils import file_util
428
-
429
- self.execute(
430
- file_util.write_file, (self.record, outputs),
431
- "writing list of installed files to '%s'" %
432
- self.record
433
- )
434
- self.warn_deprecated_options()
435
- finally:
436
- log.set_verbosity(self.distribution.verbose)
437
-
438
- def pseudo_tempname(self):
439
- """Return a pseudo-tempname base in the install directory.
440
- This code is intentionally naive; if a malicious party can write to
441
- the target directory you're already in deep doodoo.
442
- """
443
- try:
444
- pid = os.getpid()
445
- except Exception:
446
- pid = random.randint(0, sys.maxsize)
447
- return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
448
-
449
- def warn_deprecated_options(self):
450
- pass
451
-
452
- def check_site_dir(self): # noqa: C901 # is too complex (12) # FIXME
453
- """Verify that self.install_dir is .pth-capable dir, if needed"""
454
-
455
- instdir = normalize_path(self.install_dir)
456
- pth_file = os.path.join(instdir, 'easy-install.pth')
457
-
458
- if not os.path.exists(instdir):
459
- try:
460
- os.makedirs(instdir)
461
- except (OSError, IOError):
462
- self.cant_write_to_target()
463
-
464
- # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
465
- is_site_dir = instdir in self.all_site_dirs
466
-
467
- if not is_site_dir and not self.multi_version:
468
- # No? Then directly test whether it does .pth file processing
469
- is_site_dir = self.check_pth_processing()
470
- else:
471
- # make sure we can write to target dir
472
- testfile = self.pseudo_tempname() + '.write-test'
473
- test_exists = os.path.exists(testfile)
474
- try:
475
- if test_exists:
476
- os.unlink(testfile)
477
- open(testfile, 'w').close()
478
- os.unlink(testfile)
479
- except (OSError, IOError):
480
- self.cant_write_to_target()
481
-
482
- if not is_site_dir and not self.multi_version:
483
- # Can't install non-multi to non-site dir with easy_install
484
- pythonpath = os.environ.get('PYTHONPATH', '')
485
- log.warn(self.__no_default_msg, self.install_dir, pythonpath)
486
-
487
- if is_site_dir:
488
- if self.pth_file is None:
489
- self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
490
- else:
491
- self.pth_file = None
492
-
493
- if self.multi_version and not os.path.exists(pth_file):
494
- self.pth_file = None # don't create a .pth file
495
- self.install_dir = instdir
496
-
497
- __cant_write_msg = textwrap.dedent("""
498
- can't create or remove files in install directory
499
-
500
- The following error occurred while trying to add or remove files in the
501
- installation directory:
502
-
503
- %s
504
-
505
- The installation directory you specified (via --install-dir, --prefix, or
506
- the distutils default setting) was:
507
-
508
- %s
509
- """).lstrip() # noqa
510
-
511
- __not_exists_id = textwrap.dedent("""
512
- This directory does not currently exist. Please create it and try again, or
513
- choose a different installation directory (using the -d or --install-dir
514
- option).
515
- """).lstrip() # noqa
516
-
517
- __access_msg = textwrap.dedent("""
518
- Perhaps your account does not have write access to this directory? If the
519
- installation directory is a system-owned directory, you may need to sign in
520
- as the administrator or "root" account. If you do not have administrative
521
- access to this machine, you may wish to choose a different installation
522
- directory, preferably one that is listed in your PYTHONPATH environment
523
- variable.
524
-
525
- For information on other options, you may wish to consult the
526
- documentation at:
527
-
528
- https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
529
-
530
- Please make the appropriate changes for your system and try again.
531
- """).lstrip() # noqa
532
-
533
- def cant_write_to_target(self):
534
- msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
535
-
536
- if not os.path.exists(self.install_dir):
537
- msg += '\n' + self.__not_exists_id
538
- else:
539
- msg += '\n' + self.__access_msg
540
- raise DistutilsError(msg)
541
-
542
- def check_pth_processing(self):
543
- """Empirically verify whether .pth files are supported in inst. dir"""
544
- instdir = self.install_dir
545
- log.info("Checking .pth file support in %s", instdir)
546
- pth_file = self.pseudo_tempname() + ".pth"
547
- ok_file = pth_file + '.ok'
548
- ok_exists = os.path.exists(ok_file)
549
- tmpl = _one_liner("""
550
- import os
551
- f = open({ok_file!r}, 'w')
552
- f.write('OK')
553
- f.close()
554
- """) + '\n'
555
- try:
556
- if ok_exists:
557
- os.unlink(ok_file)
558
- dirname = os.path.dirname(ok_file)
559
- os.makedirs(dirname, exist_ok=True)
560
- f = open(pth_file, 'w')
561
- except (OSError, IOError):
562
- self.cant_write_to_target()
563
- else:
564
- try:
565
- f.write(tmpl.format(**locals()))
566
- f.close()
567
- f = None
568
- executable = sys.executable
569
- if os.name == 'nt':
570
- dirname, basename = os.path.split(executable)
571
- alt = os.path.join(dirname, 'pythonw.exe')
572
- use_alt = (
573
- basename.lower() == 'python.exe' and
574
- os.path.exists(alt)
575
- )
576
- if use_alt:
577
- # use pythonw.exe to avoid opening a console window
578
- executable = alt
579
-
580
- from distutils.spawn import spawn
581
-
582
- spawn([executable, '-E', '-c', 'pass'], 0)
583
-
584
- if os.path.exists(ok_file):
585
- log.info(
586
- "TEST PASSED: %s appears to support .pth files",
587
- instdir
588
- )
589
- return True
590
- finally:
591
- if f:
592
- f.close()
593
- if os.path.exists(ok_file):
594
- os.unlink(ok_file)
595
- if os.path.exists(pth_file):
596
- os.unlink(pth_file)
597
- if not self.multi_version:
598
- log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
599
- return False
600
-
601
- def install_egg_scripts(self, dist):
602
- """Write all the scripts for `dist`, unless scripts are excluded"""
603
- if not self.exclude_scripts and dist.metadata_isdir('scripts'):
604
- for script_name in dist.metadata_listdir('scripts'):
605
- if dist.metadata_isdir('scripts/' + script_name):
606
- # The "script" is a directory, likely a Python 3
607
- # __pycache__ directory, so skip it.
608
- continue
609
- self.install_script(
610
- dist, script_name,
611
- dist.get_metadata('scripts/' + script_name)
612
- )
613
- self.install_wrapper_scripts(dist)
614
-
615
- def add_output(self, path):
616
- if os.path.isdir(path):
617
- for base, dirs, files in os.walk(path):
618
- for filename in files:
619
- self.outputs.append(os.path.join(base, filename))
620
- else:
621
- self.outputs.append(path)
622
-
623
- def not_editable(self, spec):
624
- if self.editable:
625
- raise DistutilsArgError(
626
- "Invalid argument %r: you can't use filenames or URLs "
627
- "with --editable (except via the --find-links option)."
628
- % (spec,)
629
- )
630
-
631
- def check_editable(self, spec):
632
- if not self.editable:
633
- return
634
-
635
- if os.path.exists(os.path.join(self.build_directory, spec.key)):
636
- raise DistutilsArgError(
637
- "%r already exists in %s; can't do a checkout there" %
638
- (spec.key, self.build_directory)
639
- )
640
-
641
- @contextlib.contextmanager
642
- def _tmpdir(self):
643
- tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
644
- try:
645
- # cast to str as workaround for #709 and #710 and #712
646
- yield str(tmpdir)
647
- finally:
648
- os.path.exists(tmpdir) and rmtree(tmpdir)
649
-
650
- def easy_install(self, spec, deps=False):
651
- with self._tmpdir() as tmpdir:
652
- if not isinstance(spec, Requirement):
653
- if URL_SCHEME(spec):
654
- # It's a url, download it to tmpdir and process
655
- self.not_editable(spec)
656
- dl = self.package_index.download(spec, tmpdir)
657
- return self.install_item(None, dl, tmpdir, deps, True)
658
-
659
- elif os.path.exists(spec):
660
- # Existing file or directory, just process it directly
661
- self.not_editable(spec)
662
- return self.install_item(None, spec, tmpdir, deps, True)
663
- else:
664
- spec = parse_requirement_arg(spec)
665
-
666
- self.check_editable(spec)
667
- dist = self.package_index.fetch_distribution(
668
- spec, tmpdir, self.upgrade, self.editable,
669
- not self.always_copy, self.local_index
670
- )
671
- if dist is None:
672
- msg = "Could not find suitable distribution for %r" % spec
673
- if self.always_copy:
674
- msg += " (--always-copy skips system and development eggs)"
675
- raise DistutilsError(msg)
676
- elif dist.precedence == DEVELOP_DIST:
677
- # .egg-info dists don't need installing, just process deps
678
- self.process_distribution(spec, dist, deps, "Using")
679
- return dist
680
- else:
681
- return self.install_item(spec, dist.location, tmpdir, deps)
682
-
683
- def install_item(self, spec, download, tmpdir, deps, install_needed=False):
684
-
685
- # Installation is also needed if file in tmpdir or is not an egg
686
- install_needed = install_needed or self.always_copy
687
- install_needed = install_needed or os.path.dirname(download) == tmpdir
688
- install_needed = install_needed or not download.endswith('.egg')
689
- install_needed = install_needed or (
690
- self.always_copy_from is not None and
691
- os.path.dirname(normalize_path(download)) ==
692
- normalize_path(self.always_copy_from)
693
- )
694
-
695
- if spec and not install_needed:
696
- # at this point, we know it's a local .egg, we just don't know if
697
- # it's already installed.
698
- for dist in self.local_index[spec.project_name]:
699
- if dist.location == download:
700
- break
701
- else:
702
- install_needed = True # it's not in the local index
703
-
704
- log.info("Processing %s", os.path.basename(download))
705
-
706
- if install_needed:
707
- dists = self.install_eggs(spec, download, tmpdir)
708
- for dist in dists:
709
- self.process_distribution(spec, dist, deps)
710
- else:
711
- dists = [self.egg_distribution(download)]
712
- self.process_distribution(spec, dists[0], deps, "Using")
713
-
714
- if spec is not None:
715
- for dist in dists:
716
- if dist in spec:
717
- return dist
718
-
719
- def select_scheme(self, name):
720
- try:
721
- install._select_scheme(self, name)
722
- except AttributeError:
723
- # stdlib distutils
724
- install.install.select_scheme(self, name.replace('posix', 'unix'))
725
-
726
- # FIXME: 'easy_install.process_distribution' is too complex (12)
727
- def process_distribution( # noqa: C901
728
- self, requirement, dist, deps=True, *info,
729
- ):
730
- self.update_pth(dist)
731
- self.package_index.add(dist)
732
- if dist in self.local_index[dist.key]:
733
- self.local_index.remove(dist)
734
- self.local_index.add(dist)
735
- self.install_egg_scripts(dist)
736
- self.installed_projects[dist.key] = dist
737
- log.info(self.installation_report(requirement, dist, *info))
738
- if (dist.has_metadata('dependency_links.txt') and
739
- not self.no_find_links):
740
- self.package_index.add_find_links(
741
- dist.get_metadata_lines('dependency_links.txt')
742
- )
743
- if not deps and not self.always_copy:
744
- return
745
- elif requirement is not None and dist.key != requirement.key:
746
- log.warn("Skipping dependencies for %s", dist)
747
- return # XXX this is not the distribution we were looking for
748
- elif requirement is None or dist not in requirement:
749
- # if we wound up with a different version, resolve what we've got
750
- distreq = dist.as_requirement()
751
- requirement = Requirement(str(distreq))
752
- log.info("Processing dependencies for %s", requirement)
753
- try:
754
- distros = WorkingSet([]).resolve(
755
- [requirement], self.local_index, self.easy_install
756
- )
757
- except DistributionNotFound as e:
758
- raise DistutilsError(str(e)) from e
759
- except VersionConflict as e:
760
- raise DistutilsError(e.report()) from e
761
- if self.always_copy or self.always_copy_from:
762
- # Force all the relevant distros to be copied or activated
763
- for dist in distros:
764
- if dist.key not in self.installed_projects:
765
- self.easy_install(dist.as_requirement())
766
- log.info("Finished processing dependencies for %s", requirement)
767
-
768
- def should_unzip(self, dist):
769
- if self.zip_ok is not None:
770
- return not self.zip_ok
771
- if dist.has_metadata('not-zip-safe'):
772
- return True
773
- if not dist.has_metadata('zip-safe'):
774
- return True
775
- return False
776
-
777
- def maybe_move(self, spec, dist_filename, setup_base):
778
- dst = os.path.join(self.build_directory, spec.key)
779
- if os.path.exists(dst):
780
- msg = (
781
- "%r already exists in %s; build directory %s will not be kept"
782
- )
783
- log.warn(msg, spec.key, self.build_directory, setup_base)
784
- return setup_base
785
- if os.path.isdir(dist_filename):
786
- setup_base = dist_filename
787
- else:
788
- if os.path.dirname(dist_filename) == setup_base:
789
- os.unlink(dist_filename) # get it out of the tmp dir
790
- contents = os.listdir(setup_base)
791
- if len(contents) == 1:
792
- dist_filename = os.path.join(setup_base, contents[0])
793
- if os.path.isdir(dist_filename):
794
- # if the only thing there is a directory, move it instead
795
- setup_base = dist_filename
796
- ensure_directory(dst)
797
- shutil.move(setup_base, dst)
798
- return dst
799
-
800
- def install_wrapper_scripts(self, dist):
801
- if self.exclude_scripts:
802
- return
803
- for args in ScriptWriter.best().get_args(dist):
804
- self.write_script(*args)
805
-
806
- def install_script(self, dist, script_name, script_text, dev_path=None):
807
- """Generate a legacy script wrapper and install it"""
808
- spec = str(dist.as_requirement())
809
- is_script = is_python_script(script_text, script_name)
810
-
811
- if is_script:
812
- body = self._load_template(dev_path) % locals()
813
- script_text = ScriptWriter.get_header(script_text) + body
814
- self.write_script(script_name, _to_bytes(script_text), 'b')
815
-
816
- @staticmethod
817
- def _load_template(dev_path):
818
- """
819
- There are a couple of template scripts in the package. This
820
- function loads one of them and prepares it for use.
821
- """
822
- # See https://github.com/pypa/setuptools/issues/134 for info
823
- # on script file naming and downstream issues with SVR4
824
- name = 'script.tmpl'
825
- if dev_path:
826
- name = name.replace('.tmpl', ' (dev).tmpl')
827
-
828
- raw_bytes = resource_string('setuptools', name)
829
- return raw_bytes.decode('utf-8')
830
-
831
- def write_script(self, script_name, contents, mode="t", blockers=()):
832
- """Write an executable file to the scripts directory"""
833
- self.delete_blockers( # clean up old .py/.pyw w/o a script
834
- [os.path.join(self.script_dir, x) for x in blockers]
835
- )
836
- log.info("Installing %s script to %s", script_name, self.script_dir)
837
- target = os.path.join(self.script_dir, script_name)
838
- self.add_output(target)
839
-
840
- if self.dry_run:
841
- return
842
-
843
- mask = current_umask()
844
- ensure_directory(target)
845
- if os.path.exists(target):
846
- os.unlink(target)
847
- with open(target, "w" + mode) as f:
848
- f.write(contents)
849
- chmod(target, 0o777 - mask)
850
-
851
- def install_eggs(self, spec, dist_filename, tmpdir):
852
- # .egg dirs or files are already built, so just return them
853
- installer_map = {
854
- '.egg': self.install_egg,
855
- '.exe': self.install_exe,
856
- '.whl': self.install_wheel,
857
- }
858
- try:
859
- install_dist = installer_map[
860
- dist_filename.lower()[-4:]
861
- ]
862
- except KeyError:
863
- pass
864
- else:
865
- return [install_dist(dist_filename, tmpdir)]
866
-
867
- # Anything else, try to extract and build
868
- setup_base = tmpdir
869
- if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
870
- unpack_archive(dist_filename, tmpdir, self.unpack_progress)
871
- elif os.path.isdir(dist_filename):
872
- setup_base = os.path.abspath(dist_filename)
873
-
874
- if (setup_base.startswith(tmpdir) # something we downloaded
875
- and self.build_directory and spec is not None):
876
- setup_base = self.maybe_move(spec, dist_filename, setup_base)
877
-
878
- # Find the setup.py file
879
- setup_script = os.path.join(setup_base, 'setup.py')
880
-
881
- if not os.path.exists(setup_script):
882
- setups = glob(os.path.join(setup_base, '*', 'setup.py'))
883
- if not setups:
884
- raise DistutilsError(
885
- "Couldn't find a setup script in %s" %
886
- os.path.abspath(dist_filename)
887
- )
888
- if len(setups) > 1:
889
- raise DistutilsError(
890
- "Multiple setup scripts in %s" %
891
- os.path.abspath(dist_filename)
892
- )
893
- setup_script = setups[0]
894
-
895
- # Now run it, and return the result
896
- if self.editable:
897
- log.info(self.report_editable(spec, setup_script))
898
- return []
899
- else:
900
- return self.build_and_install(setup_script, setup_base)
901
-
902
- def egg_distribution(self, egg_path):
903
- if os.path.isdir(egg_path):
904
- metadata = PathMetadata(egg_path, os.path.join(egg_path,
905
- 'EGG-INFO'))
906
- else:
907
- metadata = EggMetadata(zipimport.zipimporter(egg_path))
908
- return Distribution.from_filename(egg_path, metadata=metadata)
909
-
910
- # FIXME: 'easy_install.install_egg' is too complex (11)
911
- def install_egg(self, egg_path, tmpdir): # noqa: C901
912
- destination = os.path.join(
913
- self.install_dir,
914
- os.path.basename(egg_path),
915
- )
916
- destination = os.path.abspath(destination)
917
- if not self.dry_run:
918
- ensure_directory(destination)
919
-
920
- dist = self.egg_distribution(egg_path)
921
- if not (
922
- os.path.exists(destination) and os.path.samefile(egg_path, destination)
923
- ):
924
- if os.path.isdir(destination) and not os.path.islink(destination):
925
- dir_util.remove_tree(destination, dry_run=self.dry_run)
926
- elif os.path.exists(destination):
927
- self.execute(
928
- os.unlink,
929
- (destination,),
930
- "Removing " + destination,
931
- )
932
- try:
933
- new_dist_is_zipped = False
934
- if os.path.isdir(egg_path):
935
- if egg_path.startswith(tmpdir):
936
- f, m = shutil.move, "Moving"
937
- else:
938
- f, m = shutil.copytree, "Copying"
939
- elif self.should_unzip(dist):
940
- self.mkpath(destination)
941
- f, m = self.unpack_and_compile, "Extracting"
942
- else:
943
- new_dist_is_zipped = True
944
- if egg_path.startswith(tmpdir):
945
- f, m = shutil.move, "Moving"
946
- else:
947
- f, m = shutil.copy2, "Copying"
948
- self.execute(
949
- f,
950
- (egg_path, destination),
951
- (m + " %s to %s") % (
952
- os.path.basename(egg_path),
953
- os.path.dirname(destination)
954
- ),
955
- )
956
- update_dist_caches(
957
- destination,
958
- fix_zipimporter_caches=new_dist_is_zipped,
959
- )
960
- except Exception:
961
- update_dist_caches(destination, fix_zipimporter_caches=False)
962
- raise
963
-
964
- self.add_output(destination)
965
- return self.egg_distribution(destination)
966
-
967
- def install_exe(self, dist_filename, tmpdir):
968
- # See if it's valid, get data
969
- cfg = extract_wininst_cfg(dist_filename)
970
- if cfg is None:
971
- raise DistutilsError(
972
- "%s is not a valid distutils Windows .exe" % dist_filename
973
- )
974
- # Create a dummy distribution object until we build the real distro
975
- dist = Distribution(
976
- None,
977
- project_name=cfg.get('metadata', 'name'),
978
- version=cfg.get('metadata', 'version'), platform=get_platform(),
979
- )
980
-
981
- # Convert the .exe to an unpacked egg
982
- egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
983
- dist.location = egg_path
984
- egg_tmp = egg_path + '.tmp'
985
- _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
986
- pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
987
- ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
988
- dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
989
- self.exe_to_egg(dist_filename, egg_tmp)
990
-
991
- # Write EGG-INFO/PKG-INFO
992
- if not os.path.exists(pkg_inf):
993
- f = open(pkg_inf, 'w')
994
- f.write('Metadata-Version: 1.0\n')
995
- for k, v in cfg.items('metadata'):
996
- if k != 'target_version':
997
- f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
998
- f.close()
999
- script_dir = os.path.join(_egg_info, 'scripts')
1000
- # delete entry-point scripts to avoid duping
1001
- self.delete_blockers([
1002
- os.path.join(script_dir, args[0])
1003
- for args in ScriptWriter.get_args(dist)
1004
- ])
1005
- # Build .egg file from tmpdir
1006
- bdist_egg.make_zipfile(
1007
- egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
1008
- )
1009
- # install the .egg
1010
- return self.install_egg(egg_path, tmpdir)
1011
-
1012
- # FIXME: 'easy_install.exe_to_egg' is too complex (12)
1013
- def exe_to_egg(self, dist_filename, egg_tmp): # noqa: C901
1014
- """Extract a bdist_wininst to the directories an egg would use"""
1015
- # Check for .pth file and set up prefix translations
1016
- prefixes = get_exe_prefixes(dist_filename)
1017
- to_compile = []
1018
- native_libs = []
1019
- top_level = {}
1020
-
1021
- def process(src, dst):
1022
- s = src.lower()
1023
- for old, new in prefixes:
1024
- if s.startswith(old):
1025
- src = new + src[len(old):]
1026
- parts = src.split('/')
1027
- dst = os.path.join(egg_tmp, *parts)
1028
- dl = dst.lower()
1029
- if dl.endswith('.pyd') or dl.endswith('.dll'):
1030
- parts[-1] = bdist_egg.strip_module(parts[-1])
1031
- top_level[os.path.splitext(parts[0])[0]] = 1
1032
- native_libs.append(src)
1033
- elif dl.endswith('.py') and old != 'SCRIPTS/':
1034
- top_level[os.path.splitext(parts[0])[0]] = 1
1035
- to_compile.append(dst)
1036
- return dst
1037
- if not src.endswith('.pth'):
1038
- log.warn("WARNING: can't process %s", src)
1039
- return None
1040
-
1041
- # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
1042
- unpack_archive(dist_filename, egg_tmp, process)
1043
- stubs = []
1044
- for res in native_libs:
1045
- if res.lower().endswith('.pyd'): # create stubs for .pyd's
1046
- parts = res.split('/')
1047
- resource = parts[-1]
1048
- parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
1049
- pyfile = os.path.join(egg_tmp, *parts)
1050
- to_compile.append(pyfile)
1051
- stubs.append(pyfile)
1052
- bdist_egg.write_stub(resource, pyfile)
1053
- self.byte_compile(to_compile) # compile .py's
1054
- bdist_egg.write_safety_flag(
1055
- os.path.join(egg_tmp, 'EGG-INFO'),
1056
- bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
1057
-
1058
- for name in 'top_level', 'native_libs':
1059
- if locals()[name]:
1060
- txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
1061
- if not os.path.exists(txt):
1062
- f = open(txt, 'w')
1063
- f.write('\n'.join(locals()[name]) + '\n')
1064
- f.close()
1065
-
1066
- def install_wheel(self, wheel_path, tmpdir):
1067
- wheel = Wheel(wheel_path)
1068
- assert wheel.is_compatible()
1069
- destination = os.path.join(self.install_dir, wheel.egg_name())
1070
- destination = os.path.abspath(destination)
1071
- if not self.dry_run:
1072
- ensure_directory(destination)
1073
- if os.path.isdir(destination) and not os.path.islink(destination):
1074
- dir_util.remove_tree(destination, dry_run=self.dry_run)
1075
- elif os.path.exists(destination):
1076
- self.execute(
1077
- os.unlink,
1078
- (destination,),
1079
- "Removing " + destination,
1080
- )
1081
- try:
1082
- self.execute(
1083
- wheel.install_as_egg,
1084
- (destination,),
1085
- ("Installing %s to %s") % (
1086
- os.path.basename(wheel_path),
1087
- os.path.dirname(destination)
1088
- ),
1089
- )
1090
- finally:
1091
- update_dist_caches(destination, fix_zipimporter_caches=False)
1092
- self.add_output(destination)
1093
- return self.egg_distribution(destination)
1094
-
1095
- __mv_warning = textwrap.dedent("""
1096
- Because this distribution was installed --multi-version, before you can
1097
- import modules from this package in an application, you will need to
1098
- 'import pkg_resources' and then use a 'require()' call similar to one of
1099
- these examples, in order to select the desired version:
1100
-
1101
- pkg_resources.require("%(name)s") # latest installed version
1102
- pkg_resources.require("%(name)s==%(version)s") # this exact version
1103
- pkg_resources.require("%(name)s>=%(version)s") # this version or higher
1104
- """).lstrip() # noqa
1105
-
1106
- __id_warning = textwrap.dedent("""
1107
- Note also that the installation directory must be on sys.path at runtime for
1108
- this to work. (e.g. by being the application's script directory, by being on
1109
- PYTHONPATH, or by being added to sys.path by your code.)
1110
- """) # noqa
1111
-
1112
- def installation_report(self, req, dist, what="Installed"):
1113
- """Helpful installation message for display to package users"""
1114
- msg = "\n%(what)s %(eggloc)s%(extras)s"
1115
- if self.multi_version and not self.no_report:
1116
- msg += '\n' + self.__mv_warning
1117
- if self.install_dir not in map(normalize_path, sys.path):
1118
- msg += '\n' + self.__id_warning
1119
-
1120
- eggloc = dist.location
1121
- name = dist.project_name
1122
- version = dist.version
1123
- extras = '' # TODO: self.report_extras(req, dist)
1124
- return msg % locals()
1125
-
1126
- __editable_msg = textwrap.dedent("""
1127
- Extracted editable version of %(spec)s to %(dirname)s
1128
-
1129
- If it uses setuptools in its setup script, you can activate it in
1130
- "development" mode by going to that directory and running::
1131
-
1132
- %(python)s setup.py develop
1133
-
1134
- See the setuptools documentation for the "develop" command for more info.
1135
- """).lstrip() # noqa
1136
-
1137
- def report_editable(self, spec, setup_script):
1138
- dirname = os.path.dirname(setup_script)
1139
- python = sys.executable
1140
- return '\n' + self.__editable_msg % locals()
1141
-
1142
- def run_setup(self, setup_script, setup_base, args):
1143
- sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
1144
- sys.modules.setdefault('distutils.command.egg_info', egg_info)
1145
-
1146
- args = list(args)
1147
- if self.verbose > 2:
1148
- v = 'v' * (self.verbose - 1)
1149
- args.insert(0, '-' + v)
1150
- elif self.verbose < 2:
1151
- args.insert(0, '-q')
1152
- if self.dry_run:
1153
- args.insert(0, '-n')
1154
- log.info(
1155
- "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
1156
- )
1157
- try:
1158
- run_setup(setup_script, args)
1159
- except SystemExit as v:
1160
- raise DistutilsError(
1161
- "Setup script exited with %s" % (v.args[0],)
1162
- ) from v
1163
-
1164
- def build_and_install(self, setup_script, setup_base):
1165
- args = ['bdist_egg', '--dist-dir']
1166
-
1167
- dist_dir = tempfile.mkdtemp(
1168
- prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
1169
- )
1170
- try:
1171
- self._set_fetcher_options(os.path.dirname(setup_script))
1172
- args.append(dist_dir)
1173
-
1174
- self.run_setup(setup_script, setup_base, args)
1175
- all_eggs = Environment([dist_dir])
1176
- eggs = []
1177
- for key in all_eggs:
1178
- for dist in all_eggs[key]:
1179
- eggs.append(self.install_egg(dist.location, setup_base))
1180
- if not eggs and not self.dry_run:
1181
- log.warn("No eggs found in %s (setup script problem?)",
1182
- dist_dir)
1183
- return eggs
1184
- finally:
1185
- rmtree(dist_dir)
1186
- log.set_verbosity(self.verbose) # restore our log verbosity
1187
-
1188
- def _set_fetcher_options(self, base):
1189
- """
1190
- When easy_install is about to run bdist_egg on a source dist, that
1191
- source dist might have 'setup_requires' directives, requiring
1192
- additional fetching. Ensure the fetcher options given to easy_install
1193
- are available to that command as well.
1194
- """
1195
- # find the fetch options from easy_install and write them out
1196
- # to the setup.cfg file.
1197
- ei_opts = self.distribution.get_option_dict('easy_install').copy()
1198
- fetch_directives = (
1199
- 'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
1200
- )
1201
- fetch_options = {}
1202
- for key, val in ei_opts.items():
1203
- if key not in fetch_directives:
1204
- continue
1205
- fetch_options[key] = val[1]
1206
- # create a settings dictionary suitable for `edit_config`
1207
- settings = dict(easy_install=fetch_options)
1208
- cfg_filename = os.path.join(base, 'setup.cfg')
1209
- setopt.edit_config(cfg_filename, settings)
1210
-
1211
- def update_pth(self, dist): # noqa: C901 # is too complex (11) # FIXME
1212
- if self.pth_file is None:
1213
- return
1214
-
1215
- for d in self.pth_file[dist.key]: # drop old entries
1216
- if not self.multi_version and d.location == dist.location:
1217
- continue
1218
-
1219
- log.info("Removing %s from easy-install.pth file", d)
1220
- self.pth_file.remove(d)
1221
- if d.location in self.shadow_path:
1222
- self.shadow_path.remove(d.location)
1223
-
1224
- if not self.multi_version:
1225
- if dist.location in self.pth_file.paths:
1226
- log.info(
1227
- "%s is already the active version in easy-install.pth",
1228
- dist,
1229
- )
1230
- else:
1231
- log.info("Adding %s to easy-install.pth file", dist)
1232
- self.pth_file.add(dist) # add new entry
1233
- if dist.location not in self.shadow_path:
1234
- self.shadow_path.append(dist.location)
1235
-
1236
- if self.dry_run:
1237
- return
1238
-
1239
- self.pth_file.save()
1240
-
1241
- if dist.key != 'setuptools':
1242
- return
1243
-
1244
- # Ensure that setuptools itself never becomes unavailable!
1245
- # XXX should this check for latest version?
1246
- filename = os.path.join(self.install_dir, 'setuptools.pth')
1247
- if os.path.islink(filename):
1248
- os.unlink(filename)
1249
- with open(filename, 'wt') as f:
1250
- f.write(self.pth_file.make_relative(dist.location) + '\n')
1251
-
1252
- def unpack_progress(self, src, dst):
1253
- # Progress filter for unpacking
1254
- log.debug("Unpacking %s to %s", src, dst)
1255
- return dst # only unpack-and-compile skips files for dry run
1256
-
1257
- def unpack_and_compile(self, egg_path, destination):
1258
- to_compile = []
1259
- to_chmod = []
1260
-
1261
- def pf(src, dst):
1262
- if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
1263
- to_compile.append(dst)
1264
- elif dst.endswith('.dll') or dst.endswith('.so'):
1265
- to_chmod.append(dst)
1266
- self.unpack_progress(src, dst)
1267
- return not self.dry_run and dst or None
1268
-
1269
- unpack_archive(egg_path, destination, pf)
1270
- self.byte_compile(to_compile)
1271
- if not self.dry_run:
1272
- for f in to_chmod:
1273
- mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
1274
- chmod(f, mode)
1275
-
1276
- def byte_compile(self, to_compile):
1277
- if sys.dont_write_bytecode:
1278
- return
1279
-
1280
- from distutils.util import byte_compile
1281
-
1282
- try:
1283
- # try to make the byte compile messages quieter
1284
- log.set_verbosity(self.verbose - 1)
1285
-
1286
- byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
1287
- if self.optimize:
1288
- byte_compile(
1289
- to_compile, optimize=self.optimize, force=1,
1290
- dry_run=self.dry_run,
1291
- )
1292
- finally:
1293
- log.set_verbosity(self.verbose) # restore original verbosity
1294
-
1295
- __no_default_msg = textwrap.dedent("""
1296
- bad install directory or PYTHONPATH
1297
-
1298
- You are attempting to install a package to a directory that is not
1299
- on PYTHONPATH and which Python does not read ".pth" files from. The
1300
- installation directory you specified (via --install-dir, --prefix, or
1301
- the distutils default setting) was:
1302
-
1303
- %s
1304
-
1305
- and your PYTHONPATH environment variable currently contains:
1306
-
1307
- %r
1308
-
1309
- Here are some of your options for correcting the problem:
1310
-
1311
- * You can choose a different installation directory, i.e., one that is
1312
- on PYTHONPATH or supports .pth files
1313
-
1314
- * You can add the installation directory to the PYTHONPATH environment
1315
- variable. (It must then also be on PYTHONPATH whenever you run
1316
- Python and want to use the package(s) you are installing.)
1317
-
1318
- * You can set up the installation directory to support ".pth" files by
1319
- using one of the approaches described here:
1320
-
1321
- https://setuptools.pypa.io/en/latest/deprecated/easy_install.html#custom-installation-locations
1322
-
1323
-
1324
- Please make the appropriate changes for your system and try again.
1325
- """).strip()
1326
-
1327
- def create_home_path(self):
1328
- """Create directories under ~."""
1329
- if not self.user:
1330
- return
1331
- home = convert_path(os.path.expanduser("~"))
1332
- for path in only_strs(self.config_vars.values()):
1333
- if path.startswith(home) and not os.path.isdir(path):
1334
- self.debug_print("os.makedirs('%s', 0o700)" % path)
1335
- os.makedirs(path, 0o700)
1336
-
1337
- INSTALL_SCHEMES = dict(
1338
- posix=dict(
1339
- install_dir='$base/lib/python$py_version_short/site-packages',
1340
- script_dir='$base/bin',
1341
- ),
1342
- )
1343
-
1344
- DEFAULT_SCHEME = dict(
1345
- install_dir='$base/Lib/site-packages',
1346
- script_dir='$base/Scripts',
1347
- )
1348
-
1349
- def _expand(self, *attrs):
1350
- config_vars = self.get_finalized_command('install').config_vars
1351
-
1352
- if self.prefix:
1353
- # Set default install_dir/scripts from --prefix
1354
- config_vars = dict(config_vars)
1355
- config_vars['base'] = self.prefix
1356
- scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
1357
- for attr, val in scheme.items():
1358
- if getattr(self, attr, None) is None:
1359
- setattr(self, attr, val)
1360
-
1361
- from distutils.util import subst_vars
1362
-
1363
- for attr in attrs:
1364
- val = getattr(self, attr)
1365
- if val is not None:
1366
- val = subst_vars(val, config_vars)
1367
- if os.name == 'posix':
1368
- val = os.path.expanduser(val)
1369
- setattr(self, attr, val)
1370
-
1371
-
1372
- def _pythonpath():
1373
- items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
1374
- return filter(None, items)
1375
-
1376
-
1377
- def get_site_dirs():
1378
- """
1379
- Return a list of 'site' dirs
1380
- """
1381
-
1382
- sitedirs = []
1383
-
1384
- # start with PYTHONPATH
1385
- sitedirs.extend(_pythonpath())
1386
-
1387
- prefixes = [sys.prefix]
1388
- if sys.exec_prefix != sys.prefix:
1389
- prefixes.append(sys.exec_prefix)
1390
- for prefix in prefixes:
1391
- if not prefix:
1392
- continue
1393
-
1394
- if sys.platform in ('os2emx', 'riscos'):
1395
- sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
1396
- elif os.sep == '/':
1397
- sitedirs.extend([
1398
- os.path.join(
1399
- prefix,
1400
- "lib",
1401
- "python{}.{}".format(*sys.version_info),
1402
- "site-packages",
1403
- ),
1404
- os.path.join(prefix, "lib", "site-python"),
1405
- ])
1406
- else:
1407
- sitedirs.extend([
1408
- prefix,
1409
- os.path.join(prefix, "lib", "site-packages"),
1410
- ])
1411
- if sys.platform != 'darwin':
1412
- continue
1413
-
1414
- # for framework builds *only* we add the standard Apple
1415
- # locations. Currently only per-user, but /Library and
1416
- # /Network/Library could be added too
1417
- if 'Python.framework' not in prefix:
1418
- continue
1419
-
1420
- home = os.environ.get('HOME')
1421
- if not home:
1422
- continue
1423
-
1424
- home_sp = os.path.join(
1425
- home,
1426
- 'Library',
1427
- 'Python',
1428
- '{}.{}'.format(*sys.version_info),
1429
- 'site-packages',
1430
- )
1431
- sitedirs.append(home_sp)
1432
- lib_paths = get_path('purelib'), get_path('platlib')
1433
-
1434
- sitedirs.extend(s for s in lib_paths if s not in sitedirs)
1435
-
1436
- if site.ENABLE_USER_SITE:
1437
- sitedirs.append(site.USER_SITE)
1438
-
1439
- with contextlib.suppress(AttributeError):
1440
- sitedirs.extend(site.getsitepackages())
1441
-
1442
- sitedirs = list(map(normalize_path, sitedirs))
1443
-
1444
- return sitedirs
1445
-
1446
-
1447
- def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME
1448
- """Yield sys.path directories that might contain "old-style" packages"""
1449
-
1450
- seen = {}
1451
-
1452
- for dirname in inputs:
1453
- dirname = normalize_path(dirname)
1454
- if dirname in seen:
1455
- continue
1456
-
1457
- seen[dirname] = 1
1458
- if not os.path.isdir(dirname):
1459
- continue
1460
-
1461
- files = os.listdir(dirname)
1462
- yield dirname, files
1463
-
1464
- for name in files:
1465
- if not name.endswith('.pth'):
1466
- # We only care about the .pth files
1467
- continue
1468
- if name in ('easy-install.pth', 'setuptools.pth'):
1469
- # Ignore .pth files that we control
1470
- continue
1471
-
1472
- # Read the .pth file
1473
- f = open(os.path.join(dirname, name))
1474
- lines = list(yield_lines(f))
1475
- f.close()
1476
-
1477
- # Yield existing non-dupe, non-import directory lines from it
1478
- for line in lines:
1479
- if line.startswith("import"):
1480
- continue
1481
-
1482
- line = normalize_path(line.rstrip())
1483
- if line in seen:
1484
- continue
1485
-
1486
- seen[line] = 1
1487
- if not os.path.isdir(line):
1488
- continue
1489
-
1490
- yield line, os.listdir(line)
1491
-
1492
-
1493
- def extract_wininst_cfg(dist_filename):
1494
- """Extract configuration data from a bdist_wininst .exe
1495
-
1496
- Returns a configparser.RawConfigParser, or None
1497
- """
1498
- f = open(dist_filename, 'rb')
1499
- try:
1500
- endrec = zipfile._EndRecData(f)
1501
- if endrec is None:
1502
- return None
1503
-
1504
- prepended = (endrec[9] - endrec[5]) - endrec[6]
1505
- if prepended < 12: # no wininst data here
1506
- return None
1507
- f.seek(prepended - 12)
1508
-
1509
- tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
1510
- if tag not in (0x1234567A, 0x1234567B):
1511
- return None # not a valid tag
1512
-
1513
- f.seek(prepended - (12 + cfglen))
1514
- init = {'version': '', 'target_version': ''}
1515
- cfg = configparser.RawConfigParser(init)
1516
- try:
1517
- part = f.read(cfglen)
1518
- # Read up to the first null byte.
1519
- config = part.split(b'\0', 1)[0]
1520
- # Now the config is in bytes, but for RawConfigParser, it should
1521
- # be text, so decode it.
1522
- config = config.decode(sys.getfilesystemencoding())
1523
- cfg.read_file(io.StringIO(config))
1524
- except configparser.Error:
1525
- return None
1526
- if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
1527
- return None
1528
- return cfg
1529
-
1530
- finally:
1531
- f.close()
1532
-
1533
-
1534
- def get_exe_prefixes(exe_filename):
1535
- """Get exe->egg path translations for a given .exe file"""
1536
-
1537
- prefixes = [
1538
- ('PURELIB/', ''),
1539
- ('PLATLIB/pywin32_system32', ''),
1540
- ('PLATLIB/', ''),
1541
- ('SCRIPTS/', 'EGG-INFO/scripts/'),
1542
- ('DATA/lib/site-packages', ''),
1543
- ]
1544
- z = zipfile.ZipFile(exe_filename)
1545
- try:
1546
- for info in z.infolist():
1547
- name = info.filename
1548
- parts = name.split('/')
1549
- if len(parts) == 3 and parts[2] == 'PKG-INFO':
1550
- if parts[1].endswith('.egg-info'):
1551
- prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
1552
- break
1553
- if len(parts) != 2 or not name.endswith('.pth'):
1554
- continue
1555
- if name.endswith('-nspkg.pth'):
1556
- continue
1557
- if parts[0].upper() in ('PURELIB', 'PLATLIB'):
1558
- contents = z.read(name).decode()
1559
- for pth in yield_lines(contents):
1560
- pth = pth.strip().replace('\\', '/')
1561
- if not pth.startswith('import'):
1562
- prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
1563
- finally:
1564
- z.close()
1565
- prefixes = [(x.lower(), y) for x, y in prefixes]
1566
- prefixes.sort()
1567
- prefixes.reverse()
1568
- return prefixes
1569
-
1570
-
1571
- class PthDistributions(Environment):
1572
- """A .pth file with Distribution paths in it"""
1573
-
1574
- dirty = False
1575
-
1576
- def __init__(self, filename, sitedirs=()):
1577
- self.filename = filename
1578
- self.sitedirs = list(map(normalize_path, sitedirs))
1579
- self.basedir = normalize_path(os.path.dirname(self.filename))
1580
- self._load()
1581
- super().__init__([], None, None)
1582
- for path in yield_lines(self.paths):
1583
- list(map(self.add, find_distributions(path, True)))
1584
-
1585
- def _load(self):
1586
- self.paths = []
1587
- saw_import = False
1588
- seen = dict.fromkeys(self.sitedirs)
1589
- if os.path.isfile(self.filename):
1590
- f = open(self.filename, 'rt')
1591
- for line in f:
1592
- if line.startswith('import'):
1593
- saw_import = True
1594
- continue
1595
- path = line.rstrip()
1596
- self.paths.append(path)
1597
- if not path.strip() or path.strip().startswith('#'):
1598
- continue
1599
- # skip non-existent paths, in case somebody deleted a package
1600
- # manually, and duplicate paths as well
1601
- path = self.paths[-1] = normalize_path(
1602
- os.path.join(self.basedir, path)
1603
- )
1604
- if not os.path.exists(path) or path in seen:
1605
- self.paths.pop() # skip it
1606
- self.dirty = True # we cleaned up, so we're dirty now :)
1607
- continue
1608
- seen[path] = 1
1609
- f.close()
1610
-
1611
- if self.paths and not saw_import:
1612
- self.dirty = True # ensure anything we touch has import wrappers
1613
- while self.paths and not self.paths[-1].strip():
1614
- self.paths.pop()
1615
-
1616
- def save(self):
1617
- """Write changed .pth file back to disk"""
1618
- if not self.dirty:
1619
- return
1620
-
1621
- rel_paths = list(map(self.make_relative, self.paths))
1622
- if rel_paths:
1623
- log.debug("Saving %s", self.filename)
1624
- lines = self._wrap_lines(rel_paths)
1625
- data = '\n'.join(lines) + '\n'
1626
-
1627
- if os.path.islink(self.filename):
1628
- os.unlink(self.filename)
1629
- with open(self.filename, 'wt') as f:
1630
- f.write(data)
1631
-
1632
- elif os.path.exists(self.filename):
1633
- log.debug("Deleting empty %s", self.filename)
1634
- os.unlink(self.filename)
1635
-
1636
- self.dirty = False
1637
-
1638
- @staticmethod
1639
- def _wrap_lines(lines):
1640
- return lines
1641
-
1642
- def add(self, dist):
1643
- """Add `dist` to the distribution map"""
1644
- new_path = (
1645
- dist.location not in self.paths and (
1646
- dist.location not in self.sitedirs or
1647
- # account for '.' being in PYTHONPATH
1648
- dist.location == os.getcwd()
1649
- )
1650
- )
1651
- if new_path:
1652
- self.paths.append(dist.location)
1653
- self.dirty = True
1654
- super().add(dist)
1655
-
1656
- def remove(self, dist):
1657
- """Remove `dist` from the distribution map"""
1658
- while dist.location in self.paths:
1659
- self.paths.remove(dist.location)
1660
- self.dirty = True
1661
- super().remove(dist)
1662
-
1663
- def make_relative(self, path):
1664
- npath, last = os.path.split(normalize_path(path))
1665
- baselen = len(self.basedir)
1666
- parts = [last]
1667
- sep = os.altsep == '/' and '/' or os.sep
1668
- while len(npath) >= baselen:
1669
- if npath == self.basedir:
1670
- parts.append(os.curdir)
1671
- parts.reverse()
1672
- return sep.join(parts)
1673
- npath, last = os.path.split(npath)
1674
- parts.append(last)
1675
- else:
1676
- return path
1677
-
1678
-
1679
- class RewritePthDistributions(PthDistributions):
1680
- @classmethod
1681
- def _wrap_lines(cls, lines):
1682
- yield cls.prelude
1683
- for line in lines:
1684
- yield line
1685
- yield cls.postlude
1686
-
1687
- prelude = _one_liner("""
1688
- import sys
1689
- sys.__plen = len(sys.path)
1690
- """)
1691
- postlude = _one_liner("""
1692
- import sys
1693
- new = sys.path[sys.__plen:]
1694
- del sys.path[sys.__plen:]
1695
- p = getattr(sys, '__egginsert', 0)
1696
- sys.path[p:p] = new
1697
- sys.__egginsert = p + len(new)
1698
- """)
1699
-
1700
-
1701
- if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
1702
- PthDistributions = RewritePthDistributions
1703
-
1704
-
1705
- def _first_line_re():
1706
- """
1707
- Return a regular expression based on first_line_re suitable for matching
1708
- strings.
1709
- """
1710
- if isinstance(first_line_re.pattern, str):
1711
- return first_line_re
1712
-
1713
- # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
1714
- return re.compile(first_line_re.pattern.decode())
1715
-
1716
-
1717
- def auto_chmod(func, arg, exc):
1718
- if func in [os.unlink, os.remove] and os.name == 'nt':
1719
- chmod(arg, stat.S_IWRITE)
1720
- return func(arg)
1721
- et, ev, _ = sys.exc_info()
1722
- # TODO: This code doesn't make sense. What is it trying to do?
1723
- raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
1724
-
1725
-
1726
- def update_dist_caches(dist_path, fix_zipimporter_caches):
1727
- """
1728
- Fix any globally cached `dist_path` related data
1729
-
1730
- `dist_path` should be a path of a newly installed egg distribution (zipped
1731
- or unzipped).
1732
-
1733
- sys.path_importer_cache contains finder objects that have been cached when
1734
- importing data from the original distribution. Any such finders need to be
1735
- cleared since the replacement distribution might be packaged differently,
1736
- e.g. a zipped egg distribution might get replaced with an unzipped egg
1737
- folder or vice versa. Having the old finders cached may then cause Python
1738
- to attempt loading modules from the replacement distribution using an
1739
- incorrect loader.
1740
-
1741
- zipimport.zipimporter objects are Python loaders charged with importing
1742
- data packaged inside zip archives. If stale loaders referencing the
1743
- original distribution, are left behind, they can fail to load modules from
1744
- the replacement distribution. E.g. if an old zipimport.zipimporter instance
1745
- is used to load data from a new zipped egg archive, it may cause the
1746
- operation to attempt to locate the requested data in the wrong location -
1747
- one indicated by the original distribution's zip archive directory
1748
- information. Such an operation may then fail outright, e.g. report having
1749
- read a 'bad local file header', or even worse, it may fail silently &
1750
- return invalid data.
1751
-
1752
- zipimport._zip_directory_cache contains cached zip archive directory
1753
- information for all existing zipimport.zipimporter instances and all such
1754
- instances connected to the same archive share the same cached directory
1755
- information.
1756
-
1757
- If asked, and the underlying Python implementation allows it, we can fix
1758
- all existing zipimport.zipimporter instances instead of having to track
1759
- them down and remove them one by one, by updating their shared cached zip
1760
- archive directory information. This, of course, assumes that the
1761
- replacement distribution is packaged as a zipped egg.
1762
-
1763
- If not asked to fix existing zipimport.zipimporter instances, we still do
1764
- our best to clear any remaining zipimport.zipimporter related cached data
1765
- that might somehow later get used when attempting to load data from the new
1766
- distribution and thus cause such load operations to fail. Note that when
1767
- tracking down such remaining stale data, we can not catch every conceivable
1768
- usage from here, and we clear only those that we know of and have found to
1769
- cause problems if left alive. Any remaining caches should be updated by
1770
- whomever is in charge of maintaining them, i.e. they should be ready to
1771
- handle us replacing their zip archives with new distributions at runtime.
1772
-
1773
- """
1774
- # There are several other known sources of stale zipimport.zipimporter
1775
- # instances that we do not clear here, but might if ever given a reason to
1776
- # do so:
1777
- # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
1778
- # set') may contain distributions which may in turn contain their
1779
- # zipimport.zipimporter loaders.
1780
- # * Several zipimport.zipimporter loaders held by local variables further
1781
- # up the function call stack when running the setuptools installation.
1782
- # * Already loaded modules may have their __loader__ attribute set to the
1783
- # exact loader instance used when importing them. Python 3.4 docs state
1784
- # that this information is intended mostly for introspection and so is
1785
- # not expected to cause us problems.
1786
- normalized_path = normalize_path(dist_path)
1787
- _uncache(normalized_path, sys.path_importer_cache)
1788
- if fix_zipimporter_caches:
1789
- _replace_zip_directory_cache_data(normalized_path)
1790
- else:
1791
- # Here, even though we do not want to fix existing and now stale
1792
- # zipimporter cache information, we still want to remove it. Related to
1793
- # Python's zip archive directory information cache, we clear each of
1794
- # its stale entries in two phases:
1795
- # 1. Clear the entry so attempting to access zip archive information
1796
- # via any existing stale zipimport.zipimporter instances fails.
1797
- # 2. Remove the entry from the cache so any newly constructed
1798
- # zipimport.zipimporter instances do not end up using old stale
1799
- # zip archive directory information.
1800
- # This whole stale data removal step does not seem strictly necessary,
1801
- # but has been left in because it was done before we started replacing
1802
- # the zip archive directory information cache content if possible, and
1803
- # there are no relevant unit tests that we can depend on to tell us if
1804
- # this is really needed.
1805
- _remove_and_clear_zip_directory_cache_data(normalized_path)
1806
-
1807
-
1808
- def _collect_zipimporter_cache_entries(normalized_path, cache):
1809
- """
1810
- Return zipimporter cache entry keys related to a given normalized path.
1811
-
1812
- Alternative path spellings (e.g. those using different character case or
1813
- those using alternative path separators) related to the same path are
1814
- included. Any sub-path entries are included as well, i.e. those
1815
- corresponding to zip archives embedded in other zip archives.
1816
-
1817
- """
1818
- result = []
1819
- prefix_len = len(normalized_path)
1820
- for p in cache:
1821
- np = normalize_path(p)
1822
- if (np.startswith(normalized_path) and
1823
- np[prefix_len:prefix_len + 1] in (os.sep, '')):
1824
- result.append(p)
1825
- return result
1826
-
1827
-
1828
- def _update_zipimporter_cache(normalized_path, cache, updater=None):
1829
- """
1830
- Update zipimporter cache data for a given normalized path.
1831
-
1832
- Any sub-path entries are processed as well, i.e. those corresponding to zip
1833
- archives embedded in other zip archives.
1834
-
1835
- Given updater is a callable taking a cache entry key and the original entry
1836
- (after already removing the entry from the cache), and expected to update
1837
- the entry and possibly return a new one to be inserted in its place.
1838
- Returning None indicates that the entry should not be replaced with a new
1839
- one. If no updater is given, the cache entries are simply removed without
1840
- any additional processing, the same as if the updater simply returned None.
1841
-
1842
- """
1843
- for p in _collect_zipimporter_cache_entries(normalized_path, cache):
1844
- # N.B. pypy's custom zipimport._zip_directory_cache implementation does
1845
- # not support the complete dict interface:
1846
- # * Does not support item assignment, thus not allowing this function
1847
- # to be used only for removing existing cache entries.
1848
- # * Does not support the dict.pop() method, forcing us to use the
1849
- # get/del patterns instead. For more detailed information see the
1850
- # following links:
1851
- # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
1852
- # http://bit.ly/2h9itJX
1853
- old_entry = cache[p]
1854
- del cache[p]
1855
- new_entry = updater and updater(p, old_entry)
1856
- if new_entry is not None:
1857
- cache[p] = new_entry
1858
-
1859
-
1860
- def _uncache(normalized_path, cache):
1861
- _update_zipimporter_cache(normalized_path, cache)
1862
-
1863
-
1864
- def _remove_and_clear_zip_directory_cache_data(normalized_path):
1865
- def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
1866
- old_entry.clear()
1867
-
1868
- _update_zipimporter_cache(
1869
- normalized_path, zipimport._zip_directory_cache,
1870
- updater=clear_and_remove_cached_zip_archive_directory_data)
1871
-
1872
-
1873
- # PyPy Python implementation does not allow directly writing to the
1874
- # zipimport._zip_directory_cache and so prevents us from attempting to correct
1875
- # its content. The best we can do there is clear the problematic cache content
1876
- # and have PyPy repopulate it as needed. The downside is that if there are any
1877
- # stale zipimport.zipimporter instances laying around, attempting to use them
1878
- # will fail due to not having its zip archive directory information available
1879
- # instead of being automatically corrected to use the new correct zip archive
1880
- # directory information.
1881
- if '__pypy__' in sys.builtin_module_names:
1882
- _replace_zip_directory_cache_data = \
1883
- _remove_and_clear_zip_directory_cache_data
1884
- else:
1885
-
1886
- def _replace_zip_directory_cache_data(normalized_path):
1887
- def replace_cached_zip_archive_directory_data(path, old_entry):
1888
- # N.B. In theory, we could load the zip directory information just
1889
- # once for all updated path spellings, and then copy it locally and
1890
- # update its contained path strings to contain the correct
1891
- # spelling, but that seems like a way too invasive move (this cache
1892
- # structure is not officially documented anywhere and could in
1893
- # theory change with new Python releases) for no significant
1894
- # benefit.
1895
- old_entry.clear()
1896
- zipimport.zipimporter(path)
1897
- old_entry.update(zipimport._zip_directory_cache[path])
1898
- return old_entry
1899
-
1900
- _update_zipimporter_cache(
1901
- normalized_path, zipimport._zip_directory_cache,
1902
- updater=replace_cached_zip_archive_directory_data)
1903
-
1904
-
1905
- def is_python(text, filename='<string>'):
1906
- "Is this string a valid Python script?"
1907
- try:
1908
- compile(text, filename, 'exec')
1909
- except (SyntaxError, TypeError):
1910
- return False
1911
- else:
1912
- return True
1913
-
1914
-
1915
- def is_sh(executable):
1916
- """Determine if the specified executable is a .sh (contains a #! line)"""
1917
- try:
1918
- with io.open(executable, encoding='latin-1') as fp:
1919
- magic = fp.read(2)
1920
- except (OSError, IOError):
1921
- return executable
1922
- return magic == '#!'
1923
-
1924
-
1925
- def nt_quote_arg(arg):
1926
- """Quote a command line argument according to Windows parsing rules"""
1927
- return subprocess.list2cmdline([arg])
1928
-
1929
-
1930
- def is_python_script(script_text, filename):
1931
- """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
1932
- """
1933
- if filename.endswith('.py') or filename.endswith('.pyw'):
1934
- return True # extension says it's Python
1935
- if is_python(script_text, filename):
1936
- return True # it's syntactically valid Python
1937
- if script_text.startswith('#!'):
1938
- # It begins with a '#!' line, so check if 'python' is in it somewhere
1939
- return 'python' in script_text.splitlines()[0].lower()
1940
-
1941
- return False # Not any Python I can recognize
1942
-
1943
-
1944
- try:
1945
- from os import chmod as _chmod
1946
- except ImportError:
1947
- # Jython compatibility
1948
- def _chmod(*args):
1949
- pass
1950
-
1951
-
1952
- def chmod(path, mode):
1953
- log.debug("changing mode of %s to %o", path, mode)
1954
- try:
1955
- _chmod(path, mode)
1956
- except os.error as e:
1957
- log.debug("chmod failed: %s", e)
1958
-
1959
-
1960
- class CommandSpec(list):
1961
- """
1962
- A command spec for a #! header, specified as a list of arguments akin to
1963
- those passed to Popen.
1964
- """
1965
-
1966
- options = []
1967
- split_args = dict()
1968
-
1969
- @classmethod
1970
- def best(cls):
1971
- """
1972
- Choose the best CommandSpec class based on environmental conditions.
1973
- """
1974
- return cls
1975
-
1976
- @classmethod
1977
- def _sys_executable(cls):
1978
- _default = os.path.normpath(sys.executable)
1979
- return os.environ.get('__PYVENV_LAUNCHER__', _default)
1980
-
1981
- @classmethod
1982
- def from_param(cls, param):
1983
- """
1984
- Construct a CommandSpec from a parameter to build_scripts, which may
1985
- be None.
1986
- """
1987
- if isinstance(param, cls):
1988
- return param
1989
- if isinstance(param, list):
1990
- return cls(param)
1991
- if param is None:
1992
- return cls.from_environment()
1993
- # otherwise, assume it's a string.
1994
- return cls.from_string(param)
1995
-
1996
- @classmethod
1997
- def from_environment(cls):
1998
- return cls([cls._sys_executable()])
1999
-
2000
- @classmethod
2001
- def from_string(cls, string):
2002
- """
2003
- Construct a command spec from a simple string representing a command
2004
- line parseable by shlex.split.
2005
- """
2006
- items = shlex.split(string, **cls.split_args)
2007
- return cls(items)
2008
-
2009
- def install_options(self, script_text):
2010
- self.options = shlex.split(self._extract_options(script_text))
2011
- cmdline = subprocess.list2cmdline(self)
2012
- if not isascii(cmdline):
2013
- self.options[:0] = ['-x']
2014
-
2015
- @staticmethod
2016
- def _extract_options(orig_script):
2017
- """
2018
- Extract any options from the first line of the script.
2019
- """
2020
- first = (orig_script + '\n').splitlines()[0]
2021
- match = _first_line_re().match(first)
2022
- options = match.group(1) or '' if match else ''
2023
- return options.strip()
2024
-
2025
- def as_header(self):
2026
- return self._render(self + list(self.options))
2027
-
2028
- @staticmethod
2029
- def _strip_quotes(item):
2030
- _QUOTES = '"\''
2031
- for q in _QUOTES:
2032
- if item.startswith(q) and item.endswith(q):
2033
- return item[1:-1]
2034
- return item
2035
-
2036
- @staticmethod
2037
- def _render(items):
2038
- cmdline = subprocess.list2cmdline(
2039
- CommandSpec._strip_quotes(item.strip()) for item in items)
2040
- return '#!' + cmdline + '\n'
2041
-
2042
-
2043
- # For pbr compat; will be removed in a future version.
2044
- sys_executable = CommandSpec._sys_executable()
2045
-
2046
-
2047
- class WindowsCommandSpec(CommandSpec):
2048
- split_args = dict(posix=False)
2049
-
2050
-
2051
- class ScriptWriter:
2052
- """
2053
- Encapsulates behavior around writing entry point scripts for console and
2054
- gui apps.
2055
- """
2056
-
2057
- template = textwrap.dedent(r"""
2058
- # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
2059
- import re
2060
- import sys
2061
-
2062
- # for compatibility with easy_install; see #2198
2063
- __requires__ = %(spec)r
2064
-
2065
- try:
2066
- from importlib.metadata import distribution
2067
- except ImportError:
2068
- try:
2069
- from importlib_metadata import distribution
2070
- except ImportError:
2071
- from pkg_resources import load_entry_point
2072
-
2073
-
2074
- def importlib_load_entry_point(spec, group, name):
2075
- dist_name, _, _ = spec.partition('==')
2076
- matches = (
2077
- entry_point
2078
- for entry_point in distribution(dist_name).entry_points
2079
- if entry_point.group == group and entry_point.name == name
2080
- )
2081
- return next(matches).load()
2082
-
2083
-
2084
- globals().setdefault('load_entry_point', importlib_load_entry_point)
2085
-
2086
-
2087
- if __name__ == '__main__':
2088
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
2089
- sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
2090
- """).lstrip()
2091
-
2092
- command_spec_class = CommandSpec
2093
-
2094
- @classmethod
2095
- def get_script_args(cls, dist, executable=None, wininst=False):
2096
- # for backward compatibility
2097
- warnings.warn("Use get_args", EasyInstallDeprecationWarning)
2098
- writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
2099
- header = cls.get_script_header("", executable, wininst)
2100
- return writer.get_args(dist, header)
2101
-
2102
- @classmethod
2103
- def get_script_header(cls, script_text, executable=None, wininst=False):
2104
- # for backward compatibility
2105
- warnings.warn(
2106
- "Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
2107
- if wininst:
2108
- executable = "python.exe"
2109
- return cls.get_header(script_text, executable)
2110
-
2111
- @classmethod
2112
- def get_args(cls, dist, header=None):
2113
- """
2114
- Yield write_script() argument tuples for a distribution's
2115
- console_scripts and gui_scripts entry points.
2116
- """
2117
- if header is None:
2118
- header = cls.get_header()
2119
- spec = str(dist.as_requirement())
2120
- for type_ in 'console', 'gui':
2121
- group = type_ + '_scripts'
2122
- for name, ep in dist.get_entry_map(group).items():
2123
- cls._ensure_safe_name(name)
2124
- script_text = cls.template % locals()
2125
- args = cls._get_script_args(type_, name, header, script_text)
2126
- for res in args:
2127
- yield res
2128
-
2129
- @staticmethod
2130
- def _ensure_safe_name(name):
2131
- """
2132
- Prevent paths in *_scripts entry point names.
2133
- """
2134
- has_path_sep = re.search(r'[\\/]', name)
2135
- if has_path_sep:
2136
- raise ValueError("Path separators not allowed in script names")
2137
-
2138
- @classmethod
2139
- def get_writer(cls, force_windows):
2140
- # for backward compatibility
2141
- warnings.warn("Use best", EasyInstallDeprecationWarning)
2142
- return WindowsScriptWriter.best() if force_windows else cls.best()
2143
-
2144
- @classmethod
2145
- def best(cls):
2146
- """
2147
- Select the best ScriptWriter for this environment.
2148
- """
2149
- if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
2150
- return WindowsScriptWriter.best()
2151
- else:
2152
- return cls
2153
-
2154
- @classmethod
2155
- def _get_script_args(cls, type_, name, header, script_text):
2156
- # Simply write the stub with no extension.
2157
- yield (name, header + script_text)
2158
-
2159
- @classmethod
2160
- def get_header(cls, script_text="", executable=None):
2161
- """Create a #! line, getting options (if any) from script_text"""
2162
- cmd = cls.command_spec_class.best().from_param(executable)
2163
- cmd.install_options(script_text)
2164
- return cmd.as_header()
2165
-
2166
-
2167
- class WindowsScriptWriter(ScriptWriter):
2168
- command_spec_class = WindowsCommandSpec
2169
-
2170
- @classmethod
2171
- def get_writer(cls):
2172
- # for backward compatibility
2173
- warnings.warn("Use best", EasyInstallDeprecationWarning)
2174
- return cls.best()
2175
-
2176
- @classmethod
2177
- def best(cls):
2178
- """
2179
- Select the best ScriptWriter suitable for Windows
2180
- """
2181
- writer_lookup = dict(
2182
- executable=WindowsExecutableLauncherWriter,
2183
- natural=cls,
2184
- )
2185
- # for compatibility, use the executable launcher by default
2186
- launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
2187
- return writer_lookup[launcher]
2188
-
2189
- @classmethod
2190
- def _get_script_args(cls, type_, name, header, script_text):
2191
- "For Windows, add a .py extension"
2192
- ext = dict(console='.pya', gui='.pyw')[type_]
2193
- if ext not in os.environ['PATHEXT'].lower().split(';'):
2194
- msg = (
2195
- "{ext} not listed in PATHEXT; scripts will not be "
2196
- "recognized as executables."
2197
- ).format(**locals())
2198
- warnings.warn(msg, UserWarning)
2199
- old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
2200
- old.remove(ext)
2201
- header = cls._adjust_header(type_, header)
2202
- blockers = [name + x for x in old]
2203
- yield name + ext, header + script_text, 't', blockers
2204
-
2205
- @classmethod
2206
- def _adjust_header(cls, type_, orig_header):
2207
- """
2208
- Make sure 'pythonw' is used for gui and 'python' is used for
2209
- console (regardless of what sys.executable is).
2210
- """
2211
- pattern = 'pythonw.exe'
2212
- repl = 'python.exe'
2213
- if type_ == 'gui':
2214
- pattern, repl = repl, pattern
2215
- pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
2216
- new_header = pattern_ob.sub(string=orig_header, repl=repl)
2217
- return new_header if cls._use_header(new_header) else orig_header
2218
-
2219
- @staticmethod
2220
- def _use_header(new_header):
2221
- """
2222
- Should _adjust_header use the replaced header?
2223
-
2224
- On non-windows systems, always use. On
2225
- Windows systems, only use the replaced header if it resolves
2226
- to an executable on the system.
2227
- """
2228
- clean_header = new_header[2:-1].strip('"')
2229
- return sys.platform != 'win32' or find_executable(clean_header)
2230
-
2231
-
2232
- class WindowsExecutableLauncherWriter(WindowsScriptWriter):
2233
- @classmethod
2234
- def _get_script_args(cls, type_, name, header, script_text):
2235
- """
2236
- For Windows, add a .py extension and an .exe launcher
2237
- """
2238
- if type_ == 'gui':
2239
- launcher_type = 'gui'
2240
- ext = '-script.pyw'
2241
- old = ['.pyw']
2242
- else:
2243
- launcher_type = 'cli'
2244
- ext = '-script.py'
2245
- old = ['.py', '.pyc', '.pyo']
2246
- hdr = cls._adjust_header(type_, header)
2247
- blockers = [name + x for x in old]
2248
- yield (name + ext, hdr + script_text, 't', blockers)
2249
- yield (
2250
- name + '.exe', get_win_launcher(launcher_type),
2251
- 'b' # write in binary mode
2252
- )
2253
- if not is_64bit():
2254
- # install a manifest for the launcher to prevent Windows
2255
- # from detecting it as an installer (which it will for
2256
- # launchers like easy_install.exe). Consider only
2257
- # adding a manifest for launchers detected as installers.
2258
- # See Distribute #143 for details.
2259
- m_name = name + '.exe.manifest'
2260
- yield (m_name, load_launcher_manifest(name), 't')
2261
-
2262
-
2263
- # for backward-compatibility
2264
- get_script_args = ScriptWriter.get_script_args
2265
- get_script_header = ScriptWriter.get_script_header
2266
-
2267
-
2268
- def get_win_launcher(type):
2269
- """
2270
- Load the Windows launcher (executable) suitable for launching a script.
2271
-
2272
- `type` should be either 'cli' or 'gui'
2273
-
2274
- Returns the executable as a byte string.
2275
- """
2276
- launcher_fn = '%s.exe' % type
2277
- if is_64bit():
2278
- if get_platform() == "win-arm64":
2279
- launcher_fn = launcher_fn.replace(".", "-arm64.")
2280
- else:
2281
- launcher_fn = launcher_fn.replace(".", "-64.")
2282
- else:
2283
- launcher_fn = launcher_fn.replace(".", "-32.")
2284
- return resource_string('setuptools', launcher_fn)
2285
-
2286
-
2287
- def load_launcher_manifest(name):
2288
- manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
2289
- return manifest.decode('utf-8') % vars()
2290
-
2291
-
2292
- def rmtree(path, ignore_errors=False, onerror=auto_chmod):
2293
- return shutil.rmtree(path, ignore_errors, onerror)
2294
-
2295
-
2296
- def current_umask():
2297
- tmp = os.umask(0o022)
2298
- os.umask(tmp)
2299
- return tmp
2300
-
2301
-
2302
- def only_strs(values):
2303
- """
2304
- Exclude non-str values. Ref #3063.
2305
- """
2306
- return filter(lambda val: isinstance(val, str), values)
2307
-
2308
-
2309
- class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
2310
- """
2311
- Warning for EasyInstall deprecations, bypassing suppression.
2312
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BreadBytes1/PL-Dashboard/app.py DELETED
@@ -1,992 +0,0 @@
1
- # ---
2
- # jupyter:
3
- # jupytext:
4
- # text_representation:
5
- # extension: .py
6
- # format_name: light
7
- # format_version: '1.5'
8
- # jupytext_version: 1.14.2
9
- # kernelspec:
10
- # display_name: Python [conda env:bbytes] *
11
- # language: python
12
- # name: conda-env-bbytes-py
13
- # ---
14
-
15
- # +
16
- import csv
17
- import pandas as pd
18
- from datetime import datetime, timedelta
19
- import numpy as np
20
- import datetime as dt
21
- import matplotlib.pyplot as plt
22
- from pathlib import Path
23
- import time
24
- import plotly.graph_objects as go
25
- import plotly.io as pio
26
- from PIL import Image
27
-
28
- import streamlit as st
29
- import plotly.express as px
30
- import altair as alt
31
- import dateutil.parser
32
- from matplotlib.colors import LinearSegmentedColormap
33
-
34
-
35
- # +
36
- class color:
37
- PURPLE = '\033[95m'
38
- CYAN = '\033[96m'
39
- DARKCYAN = '\033[36m'
40
- BLUE = '\033[94m'
41
- GREEN = '\033[92m'
42
- YELLOW = '\033[93m'
43
- RED = '\033[91m'
44
- BOLD = '\033[1m'
45
- UNDERLINE = '\033[4m'
46
- END = '\033[0m'
47
-
48
- @st.experimental_memo
49
- def print_PL(amnt, thresh, extras = "" ):
50
- if amnt > 0:
51
- return color.BOLD + color.GREEN + str(amnt) + extras + color.END
52
- elif amnt < 0:
53
- return color.BOLD + color.RED + str(amnt)+ extras + color.END
54
- elif np.isnan(amnt):
55
- return str(np.nan)
56
- else:
57
- return str(amnt + extras)
58
-
59
- @st.experimental_memo
60
- def get_headers(logtype):
61
- otimeheader = ""
62
- cheader = ""
63
- plheader = ""
64
- fmat = '%Y-%m-%d %H:%M:%S'
65
-
66
- if logtype == "ByBit":
67
- otimeheader = 'Create Time'
68
- cheader = 'Contracts'
69
- plheader = 'Closed P&L'
70
- fmat = '%Y-%m-%d %H:%M:%S'
71
-
72
- if logtype == "BitGet":
73
- otimeheader = 'Date'
74
- cheader = 'Futures'
75
- plheader = 'Realized P/L'
76
- fmat = '%Y-%m-%d %H:%M:%S'
77
-
78
- if logtype == "MEXC":
79
- otimeheader = 'Trade time'
80
- cheader = 'Futures'
81
- plheader = 'closing position'
82
- fmat = '%Y/%m/%d %H:%M'
83
-
84
- if logtype == "Binance":
85
- otimeheader = 'Date'
86
- cheader = 'Symbol'
87
- plheader = 'Realized Profit'
88
- fmat = '%Y-%m-%d %H:%M:%S'
89
-
90
- #if logtype == "Kucoin":
91
- # otimeheader = 'Time'
92
- # cheader = 'Contract'
93
- # plheader = ''
94
- # fmat = '%Y/%m/%d %H:%M:%S'
95
-
96
-
97
- if logtype == "Kraken":
98
- otimeheader = 'time'
99
- cheader = 'asset'
100
- plheader = 'amount'
101
- fmat = '%Y-%m-%d %H:%M:%S.%f'
102
-
103
- if logtype == "OkX":
104
- otimeheader = '\ufeffOrder Time'
105
- cheader = '\ufeffInstrument'
106
- plheader = '\ufeffPL'
107
- fmat = '%Y-%m-%d %H:%M:%S'
108
-
109
- return otimeheader.lower(), cheader.lower(), plheader.lower(), fmat
110
-
111
- @st.experimental_memo
112
- def get_coin_info(df_coin, principal_balance,plheader):
113
- numtrades = int(len(df_coin))
114
- numwin = int(sum(df_coin[plheader] > 0))
115
- numloss = int(sum(df_coin[plheader] < 0))
116
- winrate = np.round(100*numwin/numtrades,2)
117
-
118
- grosswin = sum(df_coin[df_coin[plheader] > 0][plheader])
119
- grossloss = sum(df_coin[df_coin[plheader] < 0][plheader])
120
- if grossloss != 0:
121
- pfactor = -1*np.round(grosswin/grossloss,2)
122
- else:
123
- pfactor = np.nan
124
-
125
- cum_PL = np.round(sum(df_coin[plheader].values),2)
126
- cum_PL_perc = np.round(100*cum_PL/principal_balance,2)
127
- mean_PL = np.round(sum(df_coin[plheader].values/len(df_coin)),2)
128
- mean_PL_perc = np.round(100*mean_PL/principal_balance,2)
129
-
130
- return numtrades, numwin, numloss, winrate, pfactor, cum_PL, cum_PL_perc, mean_PL, mean_PL_perc
131
-
132
- @st.experimental_memo
133
- def get_hist_info(df_coin, principal_balance,plheader):
134
- numtrades = int(len(df_coin))
135
- numwin = int(sum(df_coin[plheader] > 0))
136
- numloss = int(sum(df_coin[plheader] < 0))
137
- if numtrades != 0:
138
- winrate = int(np.round(100*numwin/numtrades,2))
139
- else:
140
- winrate = np.nan
141
-
142
- grosswin = sum(df_coin[df_coin[plheader] > 0][plheader])
143
- grossloss = sum(df_coin[df_coin[plheader] < 0][plheader])
144
- if grossloss != 0:
145
- pfactor = -1*np.round(grosswin/grossloss,2)
146
- else:
147
- pfactor = np.nan
148
- return numtrades, numwin, numloss, winrate, pfactor
149
-
150
- @st.experimental_memo
151
- def get_rolling_stats(df, lev, otimeheader, days):
152
- max_roll = (df[otimeheader].max() - df[otimeheader].min()).days
153
-
154
- if max_roll >= days:
155
- rollend = df[otimeheader].max()-timedelta(days=days)
156
- rolling_df = df[df[otimeheader] >= rollend]
157
-
158
- if len(rolling_df) > 0:
159
- rolling_perc = rolling_df['Return Per Trade'].dropna().cumprod().values[-1]-1
160
- else:
161
- rolling_perc = np.nan
162
- else:
163
- rolling_perc = np.nan
164
- return 100*rolling_perc
165
- @st.experimental_memo
166
- def cc_coding(row):
167
- return ['background-color: lightgrey'] * len(row) if row['Exit Date'] <= datetime.strptime('2022-12-16 00:00:00','%Y-%m-%d %H:%M:%S').date() else [''] * len(row)
168
- def ctt_coding(row):
169
- return ['background-color: lightgrey'] * len(row) if row['Exit Date'] <= datetime.strptime('2023-01-02 00:00:00','%Y-%m-%d %H:%M:%S').date() else [''] * len(row)
170
- def conditional_formatter(value):
171
- return "${:.2f}".format(value) if not (abs(value) < 1.00) else "${:.4f}".format(value)
172
-
173
- @st.experimental_memo
174
- def my_style(v, props=''):
175
- props = 'color:red' if v < 0 else 'color:green'
176
- return props
177
-
178
- def filt_df(df, cheader, symbol_selections):
179
-
180
- df = df.copy()
181
- df = df[df[cheader].isin(symbol_selections)]
182
-
183
- return df
184
-
185
- def tv_reformat(close50filename):
186
- try:
187
- data = pd.read_csv(open(close50filename,'r'), sep='[,|\t]', engine='python')
188
- except:
189
- data = pd.DataFrame([])
190
-
191
- if data.empty:
192
- return data
193
- else:
194
- entry_df = data[data['Type'].str.contains("Entry")]
195
- exit_df = data[data['Type'].str.contains("Exit")]
196
-
197
- entry_df.index = range(len(entry_df))
198
- exit_df.index = range(len(exit_df))
199
-
200
- df = pd.DataFrame([], columns=['Trade','Entry Date','Buy Price', 'Sell Price','Exit Date', 'P/L per token', 'P/L %', 'Drawdown %'])
201
-
202
- df['Signal'] = [string.split(' ')[1] for string in entry_df['Type']]
203
- df['Trade'] = entry_df.index
204
- df['Entry Date'] = entry_df['Date/Time']
205
- df['Buy Price'] = entry_df['Price USDT']
206
-
207
- df['Sell Price'] = exit_df['Price USDT']
208
- df['Exit Date'] = exit_df['Date/Time']
209
- df['P/L per token'] = df['Sell Price'] - df['Buy Price']
210
- df['P/L %'] = exit_df['Profit %']
211
- df['Drawdown %'] = exit_df['Drawdown %']
212
- df['Close 50'] = [int(i == "Close 50% of Position") for i in exit_df['Signal']]
213
- df = df.sort_values(['Entry Date','Close 50'], ascending = [False, True])
214
- df.index = range(len(df))
215
-
216
- df.loc[df['Close 50'] == 1, 'Exit Date'] = np.copy(df.loc[df[df['Close 50'] == 1].index.values -1]['Exit Date'])
217
-
218
- grouped_df = df.groupby('Entry Date').agg({'Signal' : 'first', 'Entry Date': 'min', 'Buy Price':'mean',
219
- 'Sell Price' : 'mean',
220
- 'Exit Date': 'max',
221
- 'P/L per token': 'mean',
222
- 'P/L %' : 'mean'})
223
-
224
- grouped_df.insert(0,'Trade', range(len(grouped_df)))
225
- grouped_df.index = range(len(grouped_df))
226
- return grouped_df
227
-
228
- def load_data(filename, otimeheader, fmat):
229
- df = pd.read_csv(open(filename,'r'), sep='\t') # so as not to mutate cached value
230
- close50filename = filename.split('.')[0] + '-50.' + filename.split('.')[1]
231
- df2 = tv_reformat(close50filename)
232
-
233
- if filename == "CT-Trade-Log.csv":
234
- df.columns = ['Trade','Entry Date','Buy Price', 'Sell Price','Exit Date', 'P/L per token', 'P/L %', 'Drawdown %']
235
- df.insert(1, 'Signal', ['Long']*len(df))
236
- elif filename == "CC-Trade-Log.csv" or filename == "PB-Trade-Log.csv":
237
- df.columns = ['Trade','Signal','Entry Date','Buy Price', 'Sell Price','Exit Date', 'P/L per token', 'P/L %', 'Drawdown %']
238
- else:
239
- df.columns = ['Trade','Signal','Entry Date','Buy Price', 'Sell Price','Exit Date', 'P/L per token', 'P/L %']
240
-
241
- if filename != "CT-Toasted-Trade-Log.csv":
242
- df['Signal'] = df['Signal'].str.replace(' ', '', regex=True)
243
- df['Buy Price'] = df['Buy Price'].str.replace('$', '', regex=True)
244
- df['Sell Price'] = df['Sell Price'].str.replace('$', '', regex=True)
245
- df['Buy Price'] = df['Buy Price'].str.replace(',', '', regex=True)
246
- df['Sell Price'] = df['Sell Price'].str.replace(',', '', regex=True)
247
- df['P/L per token'] = df['P/L per token'].str.replace('$', '', regex=True)
248
- df['P/L per token'] = df['P/L per token'].str.replace(',', '', regex=True)
249
- df['P/L %'] = df['P/L %'].str.replace('%', '', regex=True)
250
-
251
- df['Buy Price'] = pd.to_numeric(df['Buy Price'])
252
- df['Sell Price'] = pd.to_numeric(df['Sell Price'])
253
- df['P/L per token'] = pd.to_numeric(df['P/L per token'])
254
- df['P/L %'] = pd.to_numeric(df['P/L %'])
255
-
256
- if df2.empty:
257
- df = df
258
- else:
259
- df = pd.concat([df,df2], axis=0, ignore_index=True)
260
-
261
- if filename == "CT-Trade-Log.csv":
262
- df['Signal'] = ['Long']*len(df)
263
-
264
- dateheader = 'Date'
265
- theader = 'Time'
266
-
267
- df[dateheader] = [tradetimes.split(" ")[0] for tradetimes in df[otimeheader].values]
268
- df[theader] = [tradetimes.split(" ")[1] for tradetimes in df[otimeheader].values]
269
-
270
- df[otimeheader]= [dateutil.parser.parse(date+' '+time)
271
- for date,time in zip(df[dateheader],df[theader])]
272
- df[otimeheader] = pd.to_datetime(df[otimeheader])
273
- df['Exit Date'] = pd.to_datetime(df['Exit Date'])
274
- df.sort_values(by=otimeheader, inplace=True)
275
-
276
- df[dateheader] = [dateutil.parser.parse(date).date() for date in df[dateheader]]
277
- df[theader] = [dateutil.parser.parse(time).time() for time in df[theader]]
278
- df['Trade'] = df.index + 1 #reindex
279
-
280
- if filename == "CT-Trade-Log.csv":
281
- df['DCA'] = np.nan
282
-
283
- for exit in pd.unique(df['Exit Date']):
284
- df_exit = df[df['Exit Date']==exit]
285
- if dateutil.parser.parse(str(exit)) < dateutil.parser.parse('2023-02-07 13:00:00'):
286
- for i in range(len(df_exit)):
287
- ind = df_exit.index[i]
288
- df.loc[ind,'DCA'] = i+1
289
-
290
- else:
291
- for i in range(len(df_exit)):
292
- ind = df_exit.index[i]
293
- df.loc[ind,'DCA'] = i+1.1
294
- return df
295
-
296
-
297
- def get_sd_df(sd_df, sd, bot_selections, dca1, dca2, dca3, dca4, dca5, dca6, fees, lev, dollar_cap, principal_balance):
298
- sd = 2*.00026
299
- # ------ Standard Dev. Calculations.
300
- if bot_selections == "Cinnamon Toast":
301
- dca_map = {1: dca1/100, 2: dca2/100, 3: dca3/100, 4: dca4/100, 1.1: dca5/100, 2.1: dca6/100}
302
- sd_df['DCA %'] = sd_df['DCA'].map(dca_map)
303
- sd_df['Calculated Return % (+)'] = df['Signal'].map(signal_map)*(df['DCA %'])*(1-fees)*((df['Sell Price']*(1+df['Signal'].map(signal_map)*sd) - df['Buy Price']*(1-df['Signal'].map(signal_map)*sd))/df['Buy Price']*(1-df['Signal'].map(signal_map)*sd) - fees) #accounts for fees on open and close of trade
304
- sd_df['Calculated Return % (-)'] = df['Signal'].map(signal_map)*(df['DCA %'])*(1-fees)*((df['Sell Price']*(1-df['Signal'].map(signal_map)*sd)-df['Buy Price']*(1+df['Signal'].map(signal_map)*sd))/df['Buy Price']*(1+df['Signal'].map(signal_map)*sd) - fees) #accounts for fees on open and close of trade
305
- sd_df['DCA'] = np.floor(sd_df['DCA'].values)
306
-
307
- sd_df['Return Per Trade (+)'] = np.nan
308
- sd_df['Return Per Trade (-)'] = np.nan
309
- sd_df['Balance used in Trade (+)'] = np.nan
310
- sd_df['Balance used in Trade (-)'] = np.nan
311
- sd_df['New Balance (+)'] = np.nan
312
- sd_df['New Balance (-)'] = np.nan
313
-
314
- g1 = sd_df.groupby('Exit Date').sum(numeric_only=True)['Calculated Return % (+)'].reset_index(name='Return Per Trade (+)')
315
- g2 = sd_df.groupby('Exit Date').sum(numeric_only=True)['Calculated Return % (-)'].reset_index(name='Return Per Trade (-)')
316
- sd_df.loc[sd_df['DCA']==1.0,'Return Per Trade (+)'] = 1+lev*g1['Return Per Trade (+)'].values
317
- sd_df.loc[sd_df['DCA']==1.0,'Return Per Trade (-)'] = 1+lev*g2['Return Per Trade (-)'].values
318
-
319
- sd_df['Compounded Return (+)'] = sd_df['Return Per Trade (+)'].cumprod()
320
- sd_df['Compounded Return (-)'] = sd_df['Return Per Trade (-)'].cumprod()
321
- sd_df.loc[sd_df['DCA']==1.0,'New Balance (+)'] = [min(dollar_cap/lev, bal*principal_balance) for bal in sd_df.loc[sd_df['DCA']==1.0,'Compounded Return (+)']]
322
- sd_df.loc[sd_df['DCA']==1.0,'Balance used in Trade (+)'] = np.concatenate([[principal_balance], sd_df.loc[sd_df['DCA']==1.0,'New Balance (+)'].values[:-1]])
323
-
324
- sd_df.loc[sd_df['DCA']==1.0,'New Balance (-)'] = [min(dollar_cap/lev, bal*principal_balance) for bal in sd_df.loc[sd_df['DCA']==1.0,'Compounded Return (-)']]
325
- sd_df.loc[sd_df['DCA']==1.0,'Balance used in Trade (-)'] = np.concatenate([[principal_balance], sd_df.loc[sd_df['DCA']==1.0,'New Balance (-)'].values[:-1]])
326
- else:
327
- sd_df['Calculated Return % (+)'] = df['Signal'].map(signal_map)*(1-fees)*((df['Sell Price']*(1+df['Signal'].map(signal_map)*sd) - df['Buy Price']*(1-df['Signal'].map(signal_map)*sd))/df['Buy Price']*(1-df['Signal'].map(signal_map)*sd) - fees) #accounts for fees on open and close of trade
328
- sd_df['Calculated Return % (-)'] = df['Signal'].map(signal_map)*(1-fees)*((df['Sell Price']*(1-df['Signal'].map(signal_map)*sd)-df['Buy Price']*(1+df['Signal'].map(signal_map)*sd))/df['Buy Price']*(1+df['Signal'].map(signal_map)*sd) - fees) #accounts for fees on open and close of trade
329
- sd_df['Return Per Trade (+)'] = np.nan
330
- sd_df['Return Per Trade (-)'] = np.nan
331
-
332
- g1 = sd_df.groupby('Exit Date').sum(numeric_only=True)['Calculated Return % (+)'].reset_index(name='Return Per Trade (+)')
333
- g2 = sd_df.groupby('Exit Date').sum(numeric_only=True)['Calculated Return % (-)'].reset_index(name='Return Per Trade (-)')
334
- sd_df['Return Per Trade (+)'] = 1+lev*g1['Return Per Trade (+)'].values
335
- sd_df['Return Per Trade (-)'] = 1+lev*g2['Return Per Trade (-)'].values
336
-
337
- sd_df['Compounded Return (+)'] = sd_df['Return Per Trade (+)'].cumprod()
338
- sd_df['Compounded Return (-)'] = sd_df['Return Per Trade (-)'].cumprod()
339
- sd_df['New Balance (+)'] = [min(dollar_cap/lev, bal*principal_balance) for bal in sd_df['Compounded Return (+)']]
340
- sd_df['Balance used in Trade (+)'] = np.concatenate([[principal_balance], sd_df['New Balance (+)'].values[:-1]])
341
-
342
- sd_df['New Balance (-)'] = [min(dollar_cap/lev, bal*principal_balance) for bal in sd_df['Compounded Return (-)']]
343
- sd_df['Balance used in Trade (-)'] = np.concatenate([[principal_balance], sd_df['New Balance (-)'].values[:-1]])
344
-
345
- sd_df['Net P/L Per Trade (+)'] = (sd_df['Return Per Trade (+)']-1)*sd_df['Balance used in Trade (+)']
346
- sd_df['Cumulative P/L (+)'] = sd_df['Net P/L Per Trade (+)'].cumsum()
347
-
348
- sd_df['Net P/L Per Trade (-)'] = (sd_df['Return Per Trade (-)']-1)*sd_df['Balance used in Trade (-)']
349
- sd_df['Cumulative P/L (-)'] = sd_df['Net P/L Per Trade (-)'].cumsum()
350
- return sd_df
351
-
352
- def runapp() -> None:
353
- #st.header("Trading Bot Dashboard :bread: :moneybag:")
354
- #st.write("Welcome to the Trading Bot Dashboard by BreadBytes! You can use this dashboard to track " +
355
- # "the performance of our trading bots, or upload and track your own performance data from a supported exchange.")
356
- #if 'auth_user' not in st.session_state:
357
- # with st.form("Login"):
358
- # user = st.text_input("Username")
359
- # secret = st.text_input("Password")
360
-
361
- # submitted = st.form_submit_button("Submit")
362
- # if submitted:
363
- # if user == st.secrets.get("db_username") and secret == st.secrets.get("db_password"):
364
- # st.success("Success!")
365
- # st.session_state['auth_user'] = True
366
- # else:
367
- # st.success("Incorrect username and/or password. Please try again.")
368
- # st.session_state['auth_user'] = False
369
-
370
- #try:
371
- # st.session_state['auth_user'] == True
372
- #except:
373
- # st.error("Please log in.")
374
- # return
375
-
376
- #if st.session_state['auth_user'] == True:
377
- if True:
378
- st.sidebar.header("FAQ")
379
-
380
- with st.sidebar.subheader("FAQ"):
381
- st.markdown(Path("FAQ_README.md").read_text(), unsafe_allow_html=True)
382
-
383
- no_errors = True
384
-
385
- exchanges = ["ByBit", "BitGet", "Binance","Kraken","MEXC","OkX", "BreadBytes Historical Logs"]
386
- logtype = st.selectbox("Select your Exchange", options=exchanges)
387
-
388
- if logtype != "BreadBytes Historical Logs":
389
- uploaded_data = st.file_uploader(
390
- "Drag and Drop files here or click Browse files.", type=[".csv", ".xlsx"], accept_multiple_files=False
391
- )
392
- if uploaded_data is None:
393
- st.info("Please upload a file, or select BreadBytes Historical Logs as your exchange.")
394
- else:
395
- st.success("Your file was uploaded successfully!")
396
-
397
- uploadtype = uploaded_data.name.split(".")[1]
398
- if uploadtype == "csv":
399
- df = pd.read_csv(uploaded_data)
400
- if uploadtype == "xlsx":
401
- df = pd.read_excel(uploaded_data)
402
-
403
- otimeheader, cheader, plheader, fmat = get_headers(logtype)
404
-
405
- df.columns = [c.lower() for c in df.columns]
406
-
407
- if not(uploaded_data is None):
408
- with st.container():
409
- bot_selections = "Other"
410
- if bot_selections == "Other":
411
- try:
412
- symbols = list(df[cheader].unique())
413
- symbol_selections = st.multiselect(
414
- "Select/Deselect Asset(s)", options=symbols, default=symbols
415
- )
416
- except:
417
- st.error("Please select your exchange or upload a supported trade log file.")
418
- no_errors = False
419
- if no_errors and symbol_selections == None:
420
- st.error("Please select at least one asset.")
421
- no_errors = False
422
-
423
-
424
- if no_errors:
425
- if logtype == 'Binance':
426
- otimeheader = df.filter(regex=otimeheader).columns.values[0]
427
- fmat = '%Y-%m-%d %H:%M:%S'
428
- df = df[df[plheader] != 0]
429
- #if logtype == "Kucoin":
430
- # df = df.replace('\r\n','', regex=True)
431
- with st.container():
432
- col1, col2 = st.columns(2)
433
- with col1:
434
- try:
435
- startdate = st.date_input("Start Date", value=pd.to_datetime(df[otimeheader]).min())
436
- except:
437
- st.error("Please select your exchange or upload a supported trade log file.")
438
- no_errors = False
439
- with col2:
440
- try:
441
- enddate = st.date_input("End Date", value=pd.to_datetime(df[otimeheader]).max())
442
- except:
443
- st.error("Please select your exchange or upload a supported trade log file.")
444
- no_errors = False
445
- #st.sidebar.subheader("Customize your Dashboard")
446
-
447
- if no_errors and (enddate < startdate):
448
- st.error("End Date must be later than Start date. Please try again.")
449
- no_errors = False
450
- with st.container():
451
- col1,col2 = st.columns(2)
452
- with col1:
453
- principal_balance = st.number_input('Starting Balance', min_value=0.00, value=1000.00, max_value= 1000000.00, step=10.00)
454
-
455
- with st.expander("Raw Trade Log"):
456
- st.write(df)
457
-
458
-
459
- if no_errors:
460
- df = filt_df(df, cheader, symbol_selections)
461
-
462
- if len(df) == 0:
463
- st.error("There are no available trades matching your selections. Please try again!")
464
- no_errors = False
465
-
466
- if no_errors:
467
- ## reformating / necessary calculations
468
- if logtype == 'BitGet':
469
- try:
470
- badcol = df.filter(regex='Unnamed').columns.values[0]
471
- except:
472
- badcol = []
473
- df = df[[col for col in df.columns if col != badcol]]
474
- df = df[df[plheader] != 0]
475
- if uploadtype == "xlsx":
476
- fmat = '%Y-%m-%d %H:%M:%S.%f'
477
- if logtype == 'MEXC':
478
- df = df[df[plheader] != 0]
479
- # collapse on transaction ID then calculate oppsition prices!!!
480
- if logtype == "Kraken":
481
- df = df.replace('\r\n','', regex=True)
482
- df[otimeheader] = [str(time.split(".")[0]) for time in df[otimeheader].values]
483
- df = df[df['type']=='margin']
484
- df[plheader] = df[plheader]-df['fee']
485
- fmat = '%Y-%m-%d %H:%M:%S'
486
- if len(df) == 0:
487
- st.error("File Type Error. Please upload a Ledger history file from Kraken.")
488
- no_errors = False
489
-
490
- if no_errors:
491
- dateheader = 'Trade Date'
492
- theader = 'Trade Time'
493
-
494
- if type(df[otimeheader].values[0]) != str: #clunky fix to catch non-strings since np.datetime64 unstable
495
- df[otimeheader] = [str(date) for date in df[otimeheader]]
496
-
497
- df[dateheader] = [tradetimes.split(" ")[0] for tradetimes in df[otimeheader].values]
498
- df[theader] = [tradetimes.split(" ")[1] for tradetimes in df[otimeheader].values]
499
-
500
- dfmat = fmat.split(" ")[0]
501
- tfmat = fmat.split(" ")[1]
502
-
503
- df[otimeheader]= [datetime.strptime(date+' '+time,fmat)
504
- for date,time in zip(df[dateheader],df[theader])]
505
-
506
- df[dateheader] = [datetime.strptime(date,dfmat).date() for date in df[dateheader].values]
507
- df[theader] = [datetime.strptime(time,tfmat).time() for time in df[theader].values]
508
-
509
- df[otimeheader] = pd.to_datetime(df[otimeheader])
510
-
511
- df.sort_values(by=otimeheader, inplace=True)
512
- df.index = range(0,len(df))
513
-
514
- start = df.iloc[0][dateheader] if (not startdate) else startdate
515
- stop = df.iloc[len(df)-1][dateheader] if (not enddate) else enddate
516
-
517
- df = df[(df[dateheader] >= start) & (df[dateheader] <= stop)]
518
-
519
- results_df = pd.DataFrame([], columns = ['Coin', '# of Trades', 'Wins', 'Losses', 'Win Rate',
520
- 'Profit Factor', 'Cum. P/L', 'Cum. P/L (%)', 'Avg. P/L', 'Avg. P/L (%)'])
521
-
522
- for currency in pd.unique(df[cheader]):
523
- df_coin = df[(df[cheader] == currency) & (df[dateheader] >= start) & (df[dateheader] <= stop)]
524
- data = get_coin_info(df_coin, principal_balance, plheader)
525
- results_df.loc[len(results_df)] = list([currency]) + list(i for i in data)
526
-
527
- if bot_selections == "Other" and len(pd.unique(df[cheader])) > 1:
528
- df_dates = df[(df[dateheader] >= start) & (df[dateheader] <= stop)]
529
- data = get_coin_info(df_dates, principal_balance, plheader)
530
- results_df.loc[len(results_df)] = list(['Total']) + list(i for i in data)
531
-
532
- account_plural = "s" if len(bot_selections) > 1 else ""
533
- st.subheader(f"Results for your Account{account_plural}")
534
- totals = results_df[~(results_df['Coin'] == 'Total')].groupby('Coin', as_index=False).sum()
535
- if len(bot_selections) > 1:
536
- st.metric(
537
- "Gains for All Accounts",
538
- f"${totals['Cum. P/L'].sum():.2f}",
539
- f"{totals['Cum. P/L (%)'].sum():.2f} %",
540
- )
541
-
542
- max_col = 4
543
- tot_rows = int(np.ceil(len(totals)/max_col))
544
-
545
- for r in np.arange(0,tot_rows):
546
- #for column, row in zip(st.columns(len(totals)), totals.itertuples()):
547
- for column, row in zip(st.columns(max_col), totals.iloc[r*max_col:(r+1)*max_col].itertuples()):
548
- column.metric(
549
- row.Coin,
550
- f"${row._7:.2f}",
551
- f"{row._8:.2f} %",
552
- )
553
- st.subheader(f"Historical Performance")
554
- cmap=LinearSegmentedColormap.from_list('rg',["r", "grey", "g"], N=100)
555
- df['Cumulative P/L'] = df[plheader].cumsum()
556
- if logtype == "Binance": #Binance (utc) doesnt show up in st line charts???
557
- xx = dateheader
558
- else:
559
- xx = otimeheader
560
-
561
-
562
- #st.line_chart(data=df, x=xx, y='Cumulative P/L', use_container_width=True)
563
- # Create figure
564
- fig = go.Figure()
565
-
566
- pyLogo = Image.open("logo.png")
567
-
568
- # Add trace
569
- fig.add_trace(
570
- go.Scatter(x=df[xx], y=np.round(df['Cumulative P/L'].values,2), line_shape='spline', line = {'smoothing': .2, 'color' : 'rgba(31, 119, 200,.8)'}, name='Cumulative P/L')
571
- )
572
-
573
- fig.add_layout_image(
574
- dict(
575
- source=pyLogo,
576
- xref="paper",
577
- yref="paper",
578
- x = 0.05, #dfdata['Exit Date'].astype('int64').min() // 10**9,
579
- y = .85, #dfdata['Cumulative P/L'].max(),
580
- sizex= .9, #(dfdata['Exit Date'].astype('int64').max() - dfdata['Exit Date'].astype('int64').min()) // 10**9,
581
- sizey= .9, #(dfdata['Cumulative P/L'].max() - dfdata['Cumulative P/L'].min()),
582
- sizing="contain",
583
- opacity=0.2,
584
- layer = "below")
585
- )
586
-
587
- #style layout
588
- fig.update_layout(
589
- height = 600,
590
- xaxis=dict(
591
- title="Exit Date",
592
- tickmode='array',
593
- ),
594
- yaxis=dict(
595
- title="Cumulative P/L"
596
- ) )
597
-
598
- st.plotly_chart(fig, theme=None, use_container_width=True,height=600)
599
-
600
- st.subheader("Summarized Results")
601
- if df.empty:
602
- st.error("Oops! None of the data provided matches your selection(s). Please try again.")
603
- no_errors = False
604
- else:
605
- st.dataframe(results_df.style.format({'Win Rate': '{:.2f}%','Profit Factor' : '{:.2f}',
606
- 'Avg. P/L (%)': '{:.2f}%', 'Cum. P/L (%)': '{:.2f}%',
607
- 'Cum. P/L': '{:.2f}', 'Avg. P/L': '{:.2f}'})\
608
- .text_gradient(subset=['Win Rate'],cmap=cmap, vmin = 0, vmax = 100)\
609
- .text_gradient(subset=['Profit Factor'],cmap=cmap, vmin = 0, vmax = 2), use_container_width=True)
610
-
611
- if logtype == "BreadBytes Historical Logs" and no_errors:
612
-
613
- bots = ["Cinnamon Toast", "Short Bread", "Cosmic Cupcake", "Pure Bread"]
614
- bot_selections = st.selectbox("Select your Trading Bot", options=bots)
615
- otimeheader = 'Exit Date'
616
- fmat = '%Y-%m-%d %H:%M:%S'
617
- fees = .075/100
618
-
619
- if bot_selections == "Cinnamon Toast":
620
- lev_cap = 5
621
- dollar_cap = 1000000000.00
622
- data = load_data("CT-Trade-Log.csv",otimeheader, fmat)
623
- if bot_selections == "French Toast":
624
- lev_cap = 3
625
- dollar_cap = 10000000000.00
626
- data = load_data("FT-Trade-Log.csv",otimeheader, fmat)
627
- if bot_selections == "Short Bread":
628
- lev_cap = 5
629
- dollar_cap = 1000000000.00
630
- data = load_data("SB-Trade-Log.csv",otimeheader, fmat)
631
- if bot_selections == "Cosmic Cupcake":
632
- lev_cap = 3
633
- dollar_cap = 1000000000.00
634
- data = load_data("CC-Trade-Log.csv",otimeheader, fmat)
635
- if bot_selections == "Pure Bread":
636
- lev_cap = 3
637
- dollar_cap = 1000000000.00
638
- data = load_data("PB-Trade-Log.csv",otimeheader, fmat)
639
-
640
- df = data.copy(deep=True)
641
-
642
- dateheader = 'Date'
643
- theader = 'Time'
644
-
645
- st.subheader("Choose your settings:")
646
- with st.form("user input", ):
647
- if no_errors:
648
- with st.container():
649
- col1, col2 = st.columns(2)
650
- with col1:
651
- try:
652
- startdate = st.date_input("Start Date", value=pd.to_datetime(df[otimeheader]).min())
653
- except:
654
- st.error("Please select your exchange or upload a supported trade log file.")
655
- no_errors = False
656
- with col2:
657
- try:
658
- enddate = st.date_input("End Date", value=datetime.today())
659
- except:
660
- st.error("Please select your exchange or upload a supported trade log file.")
661
- no_errors = False
662
- #st.sidebar.subheader("Customize your Dashboard")
663
-
664
- if no_errors and (enddate < startdate):
665
- st.error("End Date must be later than Start date. Please try again.")
666
- no_errors = False
667
- with st.container():
668
- col1,col2 = st.columns(2)
669
- with col2:
670
- lev = st.number_input('Leverage', min_value=1, value=1, max_value= lev_cap, step=1)
671
- with col1:
672
- principal_balance = st.number_input('Starting Balance', min_value=0.00, value=1000.00, max_value= dollar_cap, step=.01)
673
-
674
- if bot_selections == "Cinnamon Toast":
675
- st.write("Choose your DCA setup (for trades before 02/07/2023)")
676
- with st.container():
677
- col1, col2, col3, col4 = st.columns(4)
678
- with col1:
679
- dca1 = st.number_input('DCA 1 Allocation', min_value=0, value=25, max_value= 100, step=1)
680
- with col2:
681
- dca2 = st.number_input('DCA 2 Allocation', min_value=0, value=25, max_value= 100, step=1)
682
- with col3:
683
- dca3 = st.number_input('DCA 3 Allocation', min_value=0, value=25, max_value= 100, step=1)
684
- with col4:
685
- dca4 = st.number_input('DCA 4 Allocation', min_value=0, value=25, max_value= 100, step=1)
686
- st.write("Choose your DCA setup (for trades on or after 02/07/2023)")
687
- with st.container():
688
- col1, col2 = st.columns(2)
689
- with col1:
690
- dca5 = st.number_input('DCA 1 Allocation', min_value=0, value=50, max_value= 100, step=1)
691
- with col2:
692
- dca6 = st.number_input('DCA 2 Allocation', min_value=0, value=50, max_value= 100, step=1)
693
-
694
- #hack way to get button centered
695
- c = st.columns(9)
696
- with c[4]:
697
- submitted = st.form_submit_button("Get Cookin'!")
698
-
699
- if submitted and principal_balance * lev > dollar_cap:
700
- lev = np.floor(dollar_cap/principal_balance)
701
- st.error(f"WARNING: (Starting Balance)*(Leverage) exceeds the ${dollar_cap} limit. Using maximum available leverage of {lev}")
702
-
703
- if submitted and no_errors:
704
- df = df[(df[dateheader] >= startdate) & (df[dateheader] <= enddate)]
705
- signal_map = {'Long': 1, 'Short':-1}
706
-
707
-
708
- if len(df) == 0:
709
- st.error("There are no available trades matching your selections. Please try again!")
710
- no_errors = False
711
-
712
- if no_errors:
713
- if bot_selections == "Cinnamon Toast":
714
- dca_map = {1: dca1/100, 2: dca2/100, 3: dca3/100, 4: dca4/100, 1.1: dca5/100, 2.1: dca6/100}
715
- df['DCA %'] = df['DCA'].map(dca_map)
716
- df['Calculated Return %'] = df['Signal'].map(signal_map)*(df['DCA %'])*(1-fees)*((df['Sell Price']-df['Buy Price'])/df['Buy Price'] - fees) #accounts for fees on open and close of trade
717
- df['DCA'] = np.floor(df['DCA'].values)
718
-
719
- df['Return Per Trade'] = np.nan
720
- df['Balance used in Trade'] = np.nan
721
- df['New Balance'] = np.nan
722
-
723
- g = df.groupby('Exit Date').sum(numeric_only=True)['Calculated Return %'].reset_index(name='Return Per Trade')
724
- df.loc[df['DCA']==1.0,'Return Per Trade'] = 1+lev*g['Return Per Trade'].values
725
-
726
- df['Compounded Return'] = df['Return Per Trade'].cumprod()
727
- df.loc[df['DCA']==1.0,'New Balance'] = [min(dollar_cap/lev, bal*principal_balance) for bal in df.loc[df['DCA']==1.0,'Compounded Return']]
728
- df.loc[df['DCA']==1.0,'Balance used in Trade'] = np.concatenate([[principal_balance], df.loc[df['DCA']==1.0,'New Balance'].values[:-1]])
729
- else:
730
- df['Calculated Return %'] = df['Signal'].map(signal_map)*(1-fees)*((df['Sell Price']-df['Buy Price'])/df['Buy Price'] - fees) #accounts for fees on open and close of trade
731
- df['Return Per Trade'] = np.nan
732
- g = df.groupby('Exit Date').sum(numeric_only=True)['Calculated Return %'].reset_index(name='Return Per Trade')
733
- df['Return Per Trade'] = 1+lev*g['Return Per Trade'].values
734
-
735
- df['Compounded Return'] = df['Return Per Trade'].cumprod()
736
- df['New Balance'] = [min(dollar_cap/lev, bal*principal_balance) for bal in df['Compounded Return']]
737
- df['Balance used in Trade'] = np.concatenate([[principal_balance], df['New Balance'].values[:-1]])
738
- df['Net P/L Per Trade'] = (df['Return Per Trade']-1)*df['Balance used in Trade']
739
- df['Cumulative P/L'] = df['Net P/L Per Trade'].cumsum()
740
-
741
- if bot_selections == "Cinnamon Toast" or bot_selections == "Cosmic Cupcake":
742
- cum_pl = df.loc[df.drop('Drawdown %', axis=1).dropna().index[-1],'Cumulative P/L'] + principal_balance
743
- #cum_sdp = sd_df.loc[sd_df.drop('Drawdown %', axis=1).dropna().index[-1],'Cumulative P/L (+)'] + principal_balance
744
- #cum_sdm = sd_df.loc[sd_df.drop('Drawdown %', axis=1).dropna().index[-1],'Cumulative P/L (-)'] + principal_balance
745
- else:
746
- cum_pl = df.loc[df.dropna().index[-1],'Cumulative P/L'] + principal_balance
747
- #cum_sdp = sd_df.loc[sd_df.dropna().index[-1],'Cumulative P/L (+)'] + principal_balance
748
- #cum_sdm = sd_df.loc[sd_df.dropna().index[-1],'Cumulative P/L (-)'] + principal_balance
749
- #sd = 2*.00026
750
- #sd_df = get_sd_df(get_sd_df(df.copy(), sd, bot_selections, dca1, dca2, dca3, dca4, dca5, dca6, fees, lev, dollar_cap, principal_balance)
751
-
752
- effective_return = 100*((cum_pl - principal_balance)/principal_balance)
753
-
754
- st.header(f"{bot_selections} Results")
755
- with st.container():
756
-
757
- if len(bot_selections) > 1:
758
- col1, col2 = st.columns(2)
759
- with col1:
760
- st.metric(
761
- "Total Account Balance",
762
- f"${cum_pl:.2f}",
763
- f"{100*(cum_pl-principal_balance)/(principal_balance):.2f} %",
764
- )
765
-
766
- # with col2:
767
- # st.write("95% of trades should fall within this 2 std. dev. range.")
768
- # st.metric(
769
- # "High Range (+ 2 std. dev.)",
770
- # f"", #${cum_sdp:.2f}
771
- # f"{100*(cum_sdp-principal_balance)/(principal_balance):.2f} %",
772
- # )
773
- # st.metric(
774
- # "Low Range (- 2 std. dev.)",
775
- # f"" ,#${cum_sdm:.2f}"
776
- # f"{100*(cum_sdm-principal_balance)/(principal_balance):.2f} %",
777
- # )
778
- if bot_selections == "Cinnamon Toast" or bot_selections == "Cosmic Cupcake" or bot_selections == "Pure Bread":
779
- #st.line_chart(data=df.drop('Drawdown %', axis=1).dropna(), x='Exit Date', y='Cumulative P/L', use_container_width=True)
780
- dfdata = df.drop('Drawdown %', axis=1).dropna()
781
- #sd_df = sd_df.drop('Drawdown %', axis=1).dropna()
782
- else:
783
- #st.line_chart(data=df.dropna(), x='Exit Date', y='Cumulative P/L', use_container_width=True)
784
- dfdata = df.dropna()
785
- #sd_df = sd_df.dropna()
786
-
787
- # Create figure
788
- fig = go.Figure()
789
-
790
- pyLogo = Image.open("logo.png")
791
-
792
- # fig.add_traces(go.Scatter(x=sd_df['Exit Date'], y = sd_df['Cumulative P/L (+)'],line_shape='spline',
793
- # line = dict(smoothing = 1.3, color='rgba(31, 119, 200,0)'), showlegend = False)
794
- # )
795
-
796
- # fig.add_traces(go.Scatter(x=sd_df['Exit Date'], y = sd_df['Cumulative P/L (-)'],
797
- # line = dict(smoothing = 1.3, color='rgba(31, 119, 200,0)'), line_shape='spline',
798
- # fill='tonexty',
799
- # fillcolor = 'rgba(31, 119, 200,.2)', name = '+/- Standard Deviation')
800
- # )
801
-
802
- # Add trace
803
- fig.add_trace(
804
- go.Scatter(x=dfdata['Exit Date'], y=np.round(dfdata['Cumulative P/L'].values,2), line_shape='spline',
805
- line = {'smoothing': 1.0, 'color' : 'rgba(31, 119, 200,.8)'},
806
- name='Cumulative P/L')
807
- )
808
- buyhold = (principal_balance/dfdata['Buy Price'][dfdata.index[0]])*(dfdata['Buy Price']-dfdata['Buy Price'][dfdata.index[0]])
809
- fig.add_trace(go.Scatter(x=dfdata['Exit Date'], y=np.round(buyhold.values,2), line_shape='spline',
810
- line = {'smoothing': 1.0, 'color' :'red'}, name = 'Buy & Hold Return')
811
- )
812
-
813
- fig.add_layout_image(
814
- dict(
815
- source=pyLogo,
816
- xref="paper",
817
- yref="paper",
818
- x = 0.05, #dfdata['Exit Date'].astype('int64').min() // 10**9,
819
- y = .85, #dfdata['Cumulative P/L'].max(),
820
- sizex= .9, #(dfdata['Exit Date'].astype('int64').max() - dfdata['Exit Date'].astype('int64').min()) // 10**9,
821
- sizey= .9, #(dfdata['Cumulative P/L'].max() - dfdata['Cumulative P/L'].min()),
822
- sizing="contain",
823
- opacity=0.2,
824
- layer = "below")
825
- )
826
-
827
- #style layout
828
- fig.update_layout(
829
- height = 600,
830
- xaxis=dict(
831
- title="Exit Date",
832
- tickmode='array',
833
- ),
834
- yaxis=dict(
835
- title="Cumulative P/L"
836
- ) )
837
-
838
- st.plotly_chart(fig, theme=None, use_container_width=True,height=600)
839
- st.write()
840
- df['Per Trade Return Rate'] = df['Return Per Trade']-1
841
-
842
- totals = pd.DataFrame([], columns = ['# of Trades', 'Wins', 'Losses', 'Win Rate', 'Profit Factor'])
843
- if bot_selections == "Cinnamon Toast" or bot_selections == "Cosmic Cupcake" or bot_selections == "Pure Bread":
844
- data = get_hist_info(df.drop('Drawdown %', axis=1).dropna(), principal_balance,'Per Trade Return Rate')
845
- else:
846
- data = get_hist_info(df.dropna(), principal_balance,'Per Trade Return Rate')
847
- totals.loc[len(totals)] = list(i for i in data)
848
-
849
- totals['Cum. P/L'] = cum_pl-principal_balance
850
- totals['Cum. P/L (%)'] = 100*(cum_pl-principal_balance)/principal_balance
851
-
852
- if df.empty:
853
- st.error("Oops! None of the data provided matches your selection(s). Please try again.")
854
- else:
855
- with st.container():
856
- for row in totals.itertuples():
857
- col1, col2, col3, col4= st.columns(4)
858
- c1, c2, c3, c4 = st.columns(4)
859
- with col1:
860
- st.metric(
861
- "Total Trades",
862
- f"{row._1:.0f}",
863
- )
864
- with c1:
865
- st.metric(
866
- "Profit Factor",
867
- f"{row._5:.2f}",
868
- )
869
- with col2:
870
- st.metric(
871
- "Wins",
872
- f"{row.Wins:.0f}",
873
- )
874
- with c2:
875
- st.metric(
876
- "Cumulative P/L",
877
- f"${row._6:.2f}",
878
- f"{row._7:.2f} %",
879
- )
880
- with col3:
881
- st.metric(
882
- "Losses",
883
- f"{row.Losses:.0f}",
884
- )
885
- with c3:
886
- st.metric(
887
- "Rolling 7 Days",
888
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
889
- f"{get_rolling_stats(df,lev, otimeheader, 7):.2f}%",
890
- )
891
- st.metric(
892
- "Rolling 30 Days",
893
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
894
- f"{get_rolling_stats(df,lev, otimeheader, 30):.2f}%",
895
- )
896
-
897
- with col4:
898
- st.metric(
899
- "Win Rate",
900
- f"{row._4:.1f}%",
901
- )
902
- with c4:
903
- st.metric(
904
- "Rolling 90 Days",
905
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
906
- f"{get_rolling_stats(df,lev, otimeheader, 90):.2f}%",
907
- )
908
- st.metric(
909
- "Rolling 180 Days",
910
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
911
- f"{get_rolling_stats(df,lev, otimeheader, 180):.2f}%",
912
- )
913
-
914
- if bot_selections == "Cinnamon Toast" and no_errors:
915
- if submitted:
916
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
917
- 'Sell Price' : 'max',
918
- 'Net P/L Per Trade': 'mean',
919
- 'Calculated Return %' : lambda x: np.round(100*lev*x.sum(),2),
920
- 'DCA': lambda x: int(np.floor(x.max()))})
921
- grouped_df.index = range(1, len(grouped_df)+1)
922
- grouped_df.rename(columns={'DCA' : '# of DCAs', 'Buy Price':'Avg. Buy Price',
923
- 'Net P/L Per Trade':'Net P/L',
924
- 'Calculated Return %':'P/L %'}, inplace=True)
925
- else:
926
- dca_map = {1: 25/100, 2: 25/100, 3: 25/100, 4: 25/100, 1.1: 50/100, 2.1: 50/100}
927
- df['DCA %'] = df['DCA'].map(dca_map)
928
- df['Calculated Return %'] = (df['DCA %'])*(1-fees)*((df['Sell Price']-df['Buy Price'])/df['Buy Price'] - fees) #accounts for fees on open and close of trade
929
-
930
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
931
- 'Sell Price' : 'max',
932
- 'P/L per token': 'mean',
933
- 'Calculated Return %' : lambda x: np.round(100*x.sum(),2),
934
- 'DCA': lambda x: int(np.floor(x.max()))})
935
- grouped_df.index = range(1, len(grouped_df)+1)
936
- grouped_df.rename(columns={'DCA' : '# of DCAs', 'Buy Price':'Avg. Buy Price',
937
- 'Calculated Return %':'P/L %',
938
- 'P/L per token':'Net P/L'}, inplace=True)
939
-
940
- else:
941
- if submitted and not(df.empty):
942
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
943
- 'Sell Price' : 'max',
944
- 'Net P/L Per Trade': 'mean',
945
- 'Calculated Return %' : lambda x: np.round(100*lev*x.sum(),2)})
946
- grouped_df.index = range(1, len(grouped_df)+1)
947
- grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
948
- 'Net P/L Per Trade':'Net P/L',
949
- 'Calculated Return %':'P/L %'}, inplace=True)
950
- else:
951
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
952
- 'Sell Price' : 'max',
953
- 'P/L per token': 'mean',
954
- 'P/L %':'mean'})
955
- grouped_df.index = range(1, len(grouped_df)+1)
956
- grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
957
- 'P/L per token':'Net P/L'}, inplace=True)
958
- st.subheader("Trade Logs")
959
- grouped_df['Entry Date'] = pd.to_datetime(grouped_df['Entry Date'])
960
- grouped_df['Exit Date'] = pd.to_datetime(grouped_df['Exit Date'])
961
- if bot_selections == "Cosmic Cupcake" or bot_selections == "CT Toasted":
962
- coding = cc_coding if bot_selections == "Cosmic Cupcake" else ctt_coding
963
- st.dataframe(grouped_df.style.format({'Entry Date':'{:%m-%d-%Y %H:%M:%S}','Exit Date':'{:%m-%d-%Y %H:%M:%S}','Avg. Buy Price': '${:.2f}', 'Sell Price': '${:.2f}', 'Net P/L':'${:.2f}', 'P/L %':'{:.2f}%'})\
964
- .apply(coding, axis=1)\
965
- .applymap(my_style,subset=['Net P/L'])\
966
- .applymap(my_style,subset=['P/L %']), use_container_width=True)
967
- new_title = '<div style="text-align: right;"><span style="background-color:lightgrey;">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span> Not Live Traded</div>'
968
- st.markdown(new_title, unsafe_allow_html=True)
969
- elif bot_selections == "Pure Bread":
970
- st.dataframe(grouped_df.style.format({'Entry Date':'{:%m-%d-%Y %H:%M:%S}','Exit Date':'{:%m-%d-%Y %H:%M:%S}','Avg. Buy Price': '${:.4f}', 'Sell Price': '${:.4f}', 'Net P/L': conditional_formatter, 'P/L %':'{:.2f}%'})\
971
- .applymap(my_style,subset=['Net P/L'])\
972
- .applymap(my_style,subset=['P/L %']), use_container_width=True)
973
- else:
974
- st.dataframe(grouped_df.style.format({'Entry Date':'{:%m-%d-%Y %H:%M:%S}','Exit Date':'{:%m-%d-%Y %H:%M:%S}','Avg. Buy Price': '${:.2f}', 'Sell Price': '${:.2f}', 'Net P/L':'${:.2f}', 'P/L %':'{:.2f}%'})\
975
- .applymap(my_style,subset=['Net P/L'])\
976
- .applymap(my_style,subset=['P/L %']), use_container_width=True)
977
-
978
- # st.subheader("Checking Status")
979
- # if submitted:
980
- # st.dataframe(sd_df)
981
-
982
- if __name__ == "__main__":
983
- st.set_page_config(
984
- "Trading Bot Dashboard",
985
- layout="wide",
986
- )
987
- runapp()
988
- # -
989
-
990
-
991
-
992
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BwayKC/darkstorm2150-Protogen_v2.2_Official_Release/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Darkstorm2150-Protogen V2.2 Official Release
3
- emoji: 💻
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- duplicated_from: jroust/darkstorm2150-Protogen_v2.2_Official_Release
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CAMP-ViL/Xplainer/article.md DELETED
@@ -1,31 +0,0 @@
1
-
2
- We propose a new way of explainability for zero-shot diagnosis prediction in the clinical domain. Instead of directly predicting a diagnosis, we prompt the model to classify the existence of descriptive observations, which a radiologist would look for on an X-Ray scan, and use the descriptor probabilities to estimate the likelihood of a diagnosis, making our model explainable by design. For this we leverage BioVil, a pretrained CLIP model for X-rays and apply contrastive observation-based prompting. We evaluate Xplainer on two chest X-ray
3
- datasets, CheXpert and ChestX-ray14, and demonstrate its effectiveness
4
- in improving the performance and explainability of zero-shot diagnosis.
5
- **Authors**: [Chantal Pellegrini][cp], [Matthias Keicher][mk], [Ege Özsoy][eo], [Petra Jiraskova][pj], [Rickmer Braren][rb], [Nassir Navab][nn]
6
-
7
- [cp]:https://www.cs.cit.tum.de/camp/members/chantal-pellegrini/
8
- [eo]:https://www.cs.cit.tum.de/camp/members/ege-oezsoy/
9
- [mk]:https://www.cs.cit.tum.de/camp/members/matthias-keicher/
10
- [pj]:https://campus.tum.de/tumonline/ee/ui/ca2/app/desktop/#/pl/ui/$ctx/visitenkarte.show_vcard?$ctx=design=ca2;header=max;lang=de&pPersonenGruppe=3&pPersonenId=46F3A857F258DEE6
11
- [rb]:https://radiologie.mri.tum.de/de/person/prof-dr-rickmer-f-braren
12
- [nn]:https://www.cs.cit.tum.de/camp/members/cv-nassir-navab/nassir-navab/
13
-
14
- **License**: MIT
15
-
16
- **Where to send questions or comments about the model**: Open an issue on [`Xplainer`](https://github.com/ChantalMP/Xplainer) repo.
17
-
18
- **Intended Use**: This model is intended to be used solely for (I) future research on visual-language processing and (II) reproducibility of the experimental results reported in the reference paper.
19
-
20
- **Primary intended uses/users**: Vision-Language and CAD researchers
21
-
22
-
23
- ## Citation
24
- ```bib
25
- @article{pellegrini2023xplainer,
26
- title={Xplainer: From X-Ray Observations to Explainable Zero-Shot Diagnosis},
27
- author={Pellegrini, Chantal and Keicher, Matthias and {\"O}zsoy, Ege and Jiraskova, Petra and Braren, Rickmer and Navab, Nassir},
28
- journal={arXiv preprint arXiv:2303.13391},
29
- year={2023}
30
- }
31
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/README.md DELETED
@@ -1,56 +0,0 @@
1
- <img src=".github/Detectron2-Logo-Horz.svg" width="300" >
2
-
3
- Detectron2 is Facebook AI Research's next generation software system
4
- that implements state-of-the-art object detection algorithms.
5
- It is a ground-up rewrite of the previous version,
6
- [Detectron](https://github.com/facebookresearch/Detectron/),
7
- and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/).
8
-
9
- <div align="center">
10
- <img src="https://user-images.githubusercontent.com/1381301/66535560-d3422200-eace-11e9-9123-5535d469db19.png"/>
11
- </div>
12
-
13
- ### What's New
14
- * It is powered by the [PyTorch](https://pytorch.org) deep learning framework.
15
- * Includes more features such as panoptic segmentation, densepose, Cascade R-CNN, rotated bounding boxes, etc.
16
- * Can be used as a library to support [different projects](projects/) on top of it.
17
- We'll open source more research projects in this way.
18
- * It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html).
19
-
20
- See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/)
21
- to see more demos and learn about detectron2.
22
-
23
- ## Installation
24
-
25
- See [INSTALL.md](INSTALL.md).
26
-
27
- ## Quick Start
28
-
29
- See [GETTING_STARTED.md](GETTING_STARTED.md),
30
- or the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5).
31
-
32
- Learn more at our [documentation](https://detectron2.readthedocs.org).
33
- And see [projects/](projects/) for some projects that are built on top of detectron2.
34
-
35
- ## Model Zoo and Baselines
36
-
37
- We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md).
38
-
39
-
40
- ## License
41
-
42
- Detectron2 is released under the [Apache 2.0 license](LICENSE).
43
-
44
- ## Citing Detectron
45
-
46
- If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry.
47
-
48
- ```BibTeX
49
- @misc{wu2019detectron2,
50
- author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and
51
- Wan-Yen Lo and Ross Girshick},
52
- title = {Detectron2},
53
- howpublished = {\url{https://github.com/facebookresearch/detectron2}},
54
- year = {2019}
55
- }
56
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/models/archs/unet_arch.py DELETED
@@ -1,693 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.utils.checkpoint as cp
4
- from mmcv.cnn import (UPSAMPLE_LAYERS, ConvModule, build_activation_layer,
5
- build_norm_layer, build_upsample_layer, constant_init,
6
- kaiming_init)
7
- from mmcv.runner import load_checkpoint
8
- from mmcv.utils.parrots_wrapper import _BatchNorm
9
- from mmseg.utils import get_root_logger
10
-
11
-
12
- class UpConvBlock(nn.Module):
13
- """Upsample convolution block in decoder for UNet.
14
-
15
- This upsample convolution block consists of one upsample module
16
- followed by one convolution block. The upsample module expands the
17
- high-level low-resolution feature map and the convolution block fuses
18
- the upsampled high-level low-resolution feature map and the low-level
19
- high-resolution feature map from encoder.
20
-
21
- Args:
22
- conv_block (nn.Sequential): Sequential of convolutional layers.
23
- in_channels (int): Number of input channels of the high-level
24
- skip_channels (int): Number of input channels of the low-level
25
- high-resolution feature map from encoder.
26
- out_channels (int): Number of output channels.
27
- num_convs (int): Number of convolutional layers in the conv_block.
28
- Default: 2.
29
- stride (int): Stride of convolutional layer in conv_block. Default: 1.
30
- dilation (int): Dilation rate of convolutional layer in conv_block.
31
- Default: 1.
32
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
33
- memory while slowing down the training speed. Default: False.
34
- conv_cfg (dict | None): Config dict for convolution layer.
35
- Default: None.
36
- norm_cfg (dict | None): Config dict for normalization layer.
37
- Default: dict(type='BN').
38
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
39
- Default: dict(type='ReLU').
40
- upsample_cfg (dict): The upsample config of the upsample module in
41
- decoder. Default: dict(type='InterpConv'). If the size of
42
- high-level feature map is the same as that of skip feature map
43
- (low-level feature map from encoder), it does not need upsample the
44
- high-level feature map and the upsample_cfg is None.
45
- dcn (bool): Use deformable convoluton in convolutional layer or not.
46
- Default: None.
47
- plugins (dict): plugins for convolutional layers. Default: None.
48
- """
49
-
50
- def __init__(self,
51
- conv_block,
52
- in_channels,
53
- skip_channels,
54
- out_channels,
55
- num_convs=2,
56
- stride=1,
57
- dilation=1,
58
- with_cp=False,
59
- conv_cfg=None,
60
- norm_cfg=dict(type='BN'),
61
- act_cfg=dict(type='ReLU'),
62
- upsample_cfg=dict(type='InterpConv'),
63
- dcn=None,
64
- plugins=None):
65
- super(UpConvBlock, self).__init__()
66
- assert dcn is None, 'Not implemented yet.'
67
- assert plugins is None, 'Not implemented yet.'
68
-
69
- self.conv_block = conv_block(
70
- in_channels=2 * skip_channels,
71
- out_channels=out_channels,
72
- num_convs=num_convs,
73
- stride=stride,
74
- dilation=dilation,
75
- with_cp=with_cp,
76
- conv_cfg=conv_cfg,
77
- norm_cfg=norm_cfg,
78
- act_cfg=act_cfg,
79
- dcn=None,
80
- plugins=None)
81
- if upsample_cfg is not None:
82
- self.upsample = build_upsample_layer(
83
- cfg=upsample_cfg,
84
- in_channels=in_channels,
85
- out_channels=skip_channels,
86
- with_cp=with_cp,
87
- norm_cfg=norm_cfg,
88
- act_cfg=act_cfg)
89
- else:
90
- self.upsample = ConvModule(
91
- in_channels,
92
- skip_channels,
93
- kernel_size=1,
94
- stride=1,
95
- padding=0,
96
- conv_cfg=conv_cfg,
97
- norm_cfg=norm_cfg,
98
- act_cfg=act_cfg)
99
-
100
- def forward(self, skip, x):
101
- """Forward function."""
102
-
103
- x = self.upsample(x)
104
- out = torch.cat([skip, x], dim=1)
105
- out = self.conv_block(out)
106
-
107
- return out
108
-
109
-
110
- class BasicConvBlock(nn.Module):
111
- """Basic convolutional block for UNet.
112
-
113
- This module consists of several plain convolutional layers.
114
-
115
- Args:
116
- in_channels (int): Number of input channels.
117
- out_channels (int): Number of output channels.
118
- num_convs (int): Number of convolutional layers. Default: 2.
119
- stride (int): Whether use stride convolution to downsample
120
- the input feature map. If stride=2, it only uses stride convolution
121
- in the first convolutional layer to downsample the input feature
122
- map. Options are 1 or 2. Default: 1.
123
- dilation (int): Whether use dilated convolution to expand the
124
- receptive field. Set dilation rate of each convolutional layer and
125
- the dilation rate of the first convolutional layer is always 1.
126
- Default: 1.
127
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
128
- memory while slowing down the training speed. Default: False.
129
- conv_cfg (dict | None): Config dict for convolution layer.
130
- Default: None.
131
- norm_cfg (dict | None): Config dict for normalization layer.
132
- Default: dict(type='BN').
133
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
134
- Default: dict(type='ReLU').
135
- dcn (bool): Use deformable convoluton in convolutional layer or not.
136
- Default: None.
137
- plugins (dict): plugins for convolutional layers. Default: None.
138
- """
139
-
140
- def __init__(self,
141
- in_channels,
142
- out_channels,
143
- num_convs=2,
144
- stride=1,
145
- dilation=1,
146
- with_cp=False,
147
- conv_cfg=None,
148
- norm_cfg=dict(type='BN'),
149
- act_cfg=dict(type='ReLU'),
150
- dcn=None,
151
- plugins=None):
152
- super(BasicConvBlock, self).__init__()
153
- assert dcn is None, 'Not implemented yet.'
154
- assert plugins is None, 'Not implemented yet.'
155
-
156
- self.with_cp = with_cp
157
- convs = []
158
- for i in range(num_convs):
159
- convs.append(
160
- ConvModule(
161
- in_channels=in_channels if i == 0 else out_channels,
162
- out_channels=out_channels,
163
- kernel_size=3,
164
- stride=stride if i == 0 else 1,
165
- dilation=1 if i == 0 else dilation,
166
- padding=1 if i == 0 else dilation,
167
- conv_cfg=conv_cfg,
168
- norm_cfg=norm_cfg,
169
- act_cfg=act_cfg))
170
-
171
- self.convs = nn.Sequential(*convs)
172
-
173
- def forward(self, x):
174
- """Forward function."""
175
-
176
- if self.with_cp and x.requires_grad:
177
- out = cp.checkpoint(self.convs, x)
178
- else:
179
- out = self.convs(x)
180
- return out
181
-
182
-
183
- class DeconvModule(nn.Module):
184
- """Deconvolution upsample module in decoder for UNet (2X upsample).
185
-
186
- This module uses deconvolution to upsample feature map in the decoder
187
- of UNet.
188
-
189
- Args:
190
- in_channels (int): Number of input channels.
191
- out_channels (int): Number of output channels.
192
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
193
- memory while slowing down the training speed. Default: False.
194
- norm_cfg (dict | None): Config dict for normalization layer.
195
- Default: dict(type='BN').
196
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
197
- Default: dict(type='ReLU').
198
- kernel_size (int): Kernel size of the convolutional layer. Default: 4.
199
- """
200
-
201
- def __init__(self,
202
- in_channels,
203
- out_channels,
204
- with_cp=False,
205
- norm_cfg=dict(type='BN'),
206
- act_cfg=dict(type='ReLU'),
207
- *,
208
- kernel_size=4,
209
- scale_factor=2):
210
- super(DeconvModule, self).__init__()
211
-
212
- assert (kernel_size - scale_factor >= 0) and\
213
- (kernel_size - scale_factor) % 2 == 0,\
214
- f'kernel_size should be greater than or equal to scale_factor '\
215
- f'and (kernel_size - scale_factor) should be even numbers, '\
216
- f'while the kernel size is {kernel_size} and scale_factor is '\
217
- f'{scale_factor}.'
218
-
219
- stride = scale_factor
220
- padding = (kernel_size - scale_factor) // 2
221
- self.with_cp = with_cp
222
- deconv = nn.ConvTranspose2d(
223
- in_channels,
224
- out_channels,
225
- kernel_size=kernel_size,
226
- stride=stride,
227
- padding=padding)
228
-
229
- norm_name, norm = build_norm_layer(norm_cfg, out_channels)
230
- activate = build_activation_layer(act_cfg)
231
- self.deconv_upsamping = nn.Sequential(deconv, norm, activate)
232
-
233
- def forward(self, x):
234
- """Forward function."""
235
-
236
- if self.with_cp and x.requires_grad:
237
- out = cp.checkpoint(self.deconv_upsamping, x)
238
- else:
239
- out = self.deconv_upsamping(x)
240
- return out
241
-
242
-
243
- @UPSAMPLE_LAYERS.register_module()
244
- class InterpConv(nn.Module):
245
- """Interpolation upsample module in decoder for UNet.
246
-
247
- This module uses interpolation to upsample feature map in the decoder
248
- of UNet. It consists of one interpolation upsample layer and one
249
- convolutional layer. It can be one interpolation upsample layer followed
250
- by one convolutional layer (conv_first=False) or one convolutional layer
251
- followed by one interpolation upsample layer (conv_first=True).
252
-
253
- Args:
254
- in_channels (int): Number of input channels.
255
- out_channels (int): Number of output channels.
256
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
257
- memory while slowing down the training speed. Default: False.
258
- norm_cfg (dict | None): Config dict for normalization layer.
259
- Default: dict(type='BN').
260
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
261
- Default: dict(type='ReLU').
262
- conv_cfg (dict | None): Config dict for convolution layer.
263
- Default: None.
264
- conv_first (bool): Whether convolutional layer or interpolation
265
- upsample layer first. Default: False. It means interpolation
266
- upsample layer followed by one convolutional layer.
267
- kernel_size (int): Kernel size of the convolutional layer. Default: 1.
268
- stride (int): Stride of the convolutional layer. Default: 1.
269
- padding (int): Padding of the convolutional layer. Default: 1.
270
- upsampe_cfg (dict): Interpolation config of the upsample layer.
271
- Default: dict(
272
- scale_factor=2, mode='bilinear', align_corners=False).
273
- """
274
-
275
- def __init__(self,
276
- in_channels,
277
- out_channels,
278
- with_cp=False,
279
- norm_cfg=dict(type='BN'),
280
- act_cfg=dict(type='ReLU'),
281
- *,
282
- conv_cfg=None,
283
- conv_first=False,
284
- kernel_size=1,
285
- stride=1,
286
- padding=0,
287
- upsampe_cfg=dict(
288
- scale_factor=2, mode='bilinear', align_corners=False)):
289
- super(InterpConv, self).__init__()
290
-
291
- self.with_cp = with_cp
292
- conv = ConvModule(
293
- in_channels,
294
- out_channels,
295
- kernel_size=kernel_size,
296
- stride=stride,
297
- padding=padding,
298
- conv_cfg=conv_cfg,
299
- norm_cfg=norm_cfg,
300
- act_cfg=act_cfg)
301
- upsample = nn.Upsample(**upsampe_cfg)
302
- if conv_first:
303
- self.interp_upsample = nn.Sequential(conv, upsample)
304
- else:
305
- self.interp_upsample = nn.Sequential(upsample, conv)
306
-
307
- def forward(self, x):
308
- """Forward function."""
309
-
310
- if self.with_cp and x.requires_grad:
311
- out = cp.checkpoint(self.interp_upsample, x)
312
- else:
313
- out = self.interp_upsample(x)
314
- return out
315
-
316
-
317
- class UNet(nn.Module):
318
- """UNet backbone.
319
- U-Net: Convolutional Networks for Biomedical Image Segmentation.
320
- https://arxiv.org/pdf/1505.04597.pdf
321
-
322
- Args:
323
- in_channels (int): Number of input image channels. Default" 3.
324
- base_channels (int): Number of base channels of each stage.
325
- The output channels of the first stage. Default: 64.
326
- num_stages (int): Number of stages in encoder, normally 5. Default: 5.
327
- strides (Sequence[int 1 | 2]): Strides of each stage in encoder.
328
- len(strides) is equal to num_stages. Normally the stride of the
329
- first stage in encoder is 1. If strides[i]=2, it uses stride
330
- convolution to downsample in the correspondence encoder stage.
331
- Default: (1, 1, 1, 1, 1).
332
- enc_num_convs (Sequence[int]): Number of convolutional layers in the
333
- convolution block of the correspondence encoder stage.
334
- Default: (2, 2, 2, 2, 2).
335
- dec_num_convs (Sequence[int]): Number of convolutional layers in the
336
- convolution block of the correspondence decoder stage.
337
- Default: (2, 2, 2, 2).
338
- downsamples (Sequence[int]): Whether use MaxPool to downsample the
339
- feature map after the first stage of encoder
340
- (stages: [1, num_stages)). If the correspondence encoder stage use
341
- stride convolution (strides[i]=2), it will never use MaxPool to
342
- downsample, even downsamples[i-1]=True.
343
- Default: (True, True, True, True).
344
- enc_dilations (Sequence[int]): Dilation rate of each stage in encoder.
345
- Default: (1, 1, 1, 1, 1).
346
- dec_dilations (Sequence[int]): Dilation rate of each stage in decoder.
347
- Default: (1, 1, 1, 1).
348
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
349
- memory while slowing down the training speed. Default: False.
350
- conv_cfg (dict | None): Config dict for convolution layer.
351
- Default: None.
352
- norm_cfg (dict | None): Config dict for normalization layer.
353
- Default: dict(type='BN').
354
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
355
- Default: dict(type='ReLU').
356
- upsample_cfg (dict): The upsample config of the upsample module in
357
- decoder. Default: dict(type='InterpConv').
358
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
359
- freeze running stats (mean and var). Note: Effect on Batch Norm
360
- and its variants only. Default: False.
361
- dcn (bool): Use deformable convolution in convolutional layer or not.
362
- Default: None.
363
- plugins (dict): plugins for convolutional layers. Default: None.
364
-
365
- Notice:
366
- The input image size should be devisible by the whole downsample rate
367
- of the encoder. More detail of the whole downsample rate can be found
368
- in UNet._check_input_devisible.
369
-
370
- """
371
-
372
- def __init__(self,
373
- in_channels=3,
374
- base_channels=64,
375
- num_stages=5,
376
- strides=(1, 1, 1, 1, 1),
377
- enc_num_convs=(2, 2, 2, 2, 2),
378
- dec_num_convs=(2, 2, 2, 2),
379
- downsamples=(True, True, True, True),
380
- enc_dilations=(1, 1, 1, 1, 1),
381
- dec_dilations=(1, 1, 1, 1),
382
- with_cp=False,
383
- conv_cfg=None,
384
- norm_cfg=dict(type='BN'),
385
- act_cfg=dict(type='ReLU'),
386
- upsample_cfg=dict(type='InterpConv'),
387
- norm_eval=False,
388
- dcn=None,
389
- plugins=None):
390
- super(UNet, self).__init__()
391
- assert dcn is None, 'Not implemented yet.'
392
- assert plugins is None, 'Not implemented yet.'
393
- assert len(strides) == num_stages, \
394
- 'The length of strides should be equal to num_stages, '\
395
- f'while the strides is {strides}, the length of '\
396
- f'strides is {len(strides)}, and the num_stages is '\
397
- f'{num_stages}.'
398
- assert len(enc_num_convs) == num_stages, \
399
- 'The length of enc_num_convs should be equal to num_stages, '\
400
- f'while the enc_num_convs is {enc_num_convs}, the length of '\
401
- f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\
402
- f'{num_stages}.'
403
- assert len(dec_num_convs) == (num_stages-1), \
404
- 'The length of dec_num_convs should be equal to (num_stages-1), '\
405
- f'while the dec_num_convs is {dec_num_convs}, the length of '\
406
- f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\
407
- f'{num_stages}.'
408
- assert len(downsamples) == (num_stages-1), \
409
- 'The length of downsamples should be equal to (num_stages-1), '\
410
- f'while the downsamples is {downsamples}, the length of '\
411
- f'downsamples is {len(downsamples)}, and the num_stages is '\
412
- f'{num_stages}.'
413
- assert len(enc_dilations) == num_stages, \
414
- 'The length of enc_dilations should be equal to num_stages, '\
415
- f'while the enc_dilations is {enc_dilations}, the length of '\
416
- f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\
417
- f'{num_stages}.'
418
- assert len(dec_dilations) == (num_stages-1), \
419
- 'The length of dec_dilations should be equal to (num_stages-1), '\
420
- f'while the dec_dilations is {dec_dilations}, the length of '\
421
- f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\
422
- f'{num_stages}.'
423
- self.num_stages = num_stages
424
- self.strides = strides
425
- self.downsamples = downsamples
426
- self.norm_eval = norm_eval
427
-
428
- self.encoder = nn.ModuleList()
429
- self.decoder = nn.ModuleList()
430
-
431
- for i in range(num_stages):
432
- enc_conv_block = []
433
- if i != 0:
434
- if strides[i] == 1 and downsamples[i - 1]:
435
- enc_conv_block.append(nn.MaxPool2d(kernel_size=2))
436
- upsample = (strides[i] != 1 or downsamples[i - 1])
437
- self.decoder.append(
438
- UpConvBlock(
439
- conv_block=BasicConvBlock,
440
- in_channels=base_channels * 2**i,
441
- skip_channels=base_channels * 2**(i - 1),
442
- out_channels=base_channels * 2**(i - 1),
443
- num_convs=dec_num_convs[i - 1],
444
- stride=1,
445
- dilation=dec_dilations[i - 1],
446
- with_cp=with_cp,
447
- conv_cfg=conv_cfg,
448
- norm_cfg=norm_cfg,
449
- act_cfg=act_cfg,
450
- upsample_cfg=upsample_cfg if upsample else None,
451
- dcn=None,
452
- plugins=None))
453
-
454
- enc_conv_block.append(
455
- BasicConvBlock(
456
- in_channels=in_channels,
457
- out_channels=base_channels * 2**i,
458
- num_convs=enc_num_convs[i],
459
- stride=strides[i],
460
- dilation=enc_dilations[i],
461
- with_cp=with_cp,
462
- conv_cfg=conv_cfg,
463
- norm_cfg=norm_cfg,
464
- act_cfg=act_cfg,
465
- dcn=None,
466
- plugins=None))
467
- self.encoder.append((nn.Sequential(*enc_conv_block)))
468
- in_channels = base_channels * 2**i
469
-
470
- def forward(self, x):
471
- enc_outs = []
472
-
473
- for enc in self.encoder:
474
- x = enc(x)
475
- enc_outs.append(x)
476
- dec_outs = [x]
477
- for i in reversed(range(len(self.decoder))):
478
- x = self.decoder[i](enc_outs[i], x)
479
- dec_outs.append(x)
480
-
481
- return dec_outs
482
-
483
- def init_weights(self, pretrained=None):
484
- """Initialize the weights in backbone.
485
-
486
- Args:
487
- pretrained (str, optional): Path to pre-trained weights.
488
- Defaults to None.
489
- """
490
- if isinstance(pretrained, str):
491
- logger = get_root_logger()
492
- load_checkpoint(self, pretrained, strict=False, logger=logger)
493
- elif pretrained is None:
494
- for m in self.modules():
495
- if isinstance(m, nn.Conv2d):
496
- kaiming_init(m)
497
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
498
- constant_init(m, 1)
499
- else:
500
- raise TypeError('pretrained must be a str or None')
501
-
502
-
503
- class ShapeUNet(nn.Module):
504
- """ShapeUNet backbone with small modifications.
505
- U-Net: Convolutional Networks for Biomedical Image Segmentation.
506
- https://arxiv.org/pdf/1505.04597.pdf
507
-
508
- Args:
509
- in_channels (int): Number of input image channels. Default" 3.
510
- base_channels (int): Number of base channels of each stage.
511
- The output channels of the first stage. Default: 64.
512
- num_stages (int): Number of stages in encoder, normally 5. Default: 5.
513
- strides (Sequence[int 1 | 2]): Strides of each stage in encoder.
514
- len(strides) is equal to num_stages. Normally the stride of the
515
- first stage in encoder is 1. If strides[i]=2, it uses stride
516
- convolution to downsample in the correspondance encoder stage.
517
- Default: (1, 1, 1, 1, 1).
518
- enc_num_convs (Sequence[int]): Number of convolutional layers in the
519
- convolution block of the correspondance encoder stage.
520
- Default: (2, 2, 2, 2, 2).
521
- dec_num_convs (Sequence[int]): Number of convolutional layers in the
522
- convolution block of the correspondance decoder stage.
523
- Default: (2, 2, 2, 2).
524
- downsamples (Sequence[int]): Whether use MaxPool to downsample the
525
- feature map after the first stage of encoder
526
- (stages: [1, num_stages)). If the correspondance encoder stage use
527
- stride convolution (strides[i]=2), it will never use MaxPool to
528
- downsample, even downsamples[i-1]=True.
529
- Default: (True, True, True, True).
530
- enc_dilations (Sequence[int]): Dilation rate of each stage in encoder.
531
- Default: (1, 1, 1, 1, 1).
532
- dec_dilations (Sequence[int]): Dilation rate of each stage in decoder.
533
- Default: (1, 1, 1, 1).
534
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
535
- memory while slowing down the training speed. Default: False.
536
- conv_cfg (dict | None): Config dict for convolution layer.
537
- Default: None.
538
- norm_cfg (dict | None): Config dict for normalization layer.
539
- Default: dict(type='BN').
540
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
541
- Default: dict(type='ReLU').
542
- upsample_cfg (dict): The upsample config of the upsample module in
543
- decoder. Default: dict(type='InterpConv').
544
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
545
- freeze running stats (mean and var). Note: Effect on Batch Norm
546
- and its variants only. Default: False.
547
- dcn (bool): Use deformable convoluton in convolutional layer or not.
548
- Default: None.
549
- plugins (dict): plugins for convolutional layers. Default: None.
550
-
551
- Notice:
552
- The input image size should be devisible by the whole downsample rate
553
- of the encoder. More detail of the whole downsample rate can be found
554
- in UNet._check_input_devisible.
555
-
556
- """
557
-
558
- def __init__(self,
559
- in_channels=3,
560
- base_channels=64,
561
- num_stages=5,
562
- attr_embedding=128,
563
- strides=(1, 1, 1, 1, 1),
564
- enc_num_convs=(2, 2, 2, 2, 2),
565
- dec_num_convs=(2, 2, 2, 2),
566
- downsamples=(True, True, True, True),
567
- enc_dilations=(1, 1, 1, 1, 1),
568
- dec_dilations=(1, 1, 1, 1),
569
- with_cp=False,
570
- conv_cfg=None,
571
- norm_cfg=dict(type='BN'),
572
- act_cfg=dict(type='ReLU'),
573
- upsample_cfg=dict(type='InterpConv'),
574
- norm_eval=False,
575
- dcn=None,
576
- plugins=None):
577
- super(ShapeUNet, self).__init__()
578
- assert dcn is None, 'Not implemented yet.'
579
- assert plugins is None, 'Not implemented yet.'
580
- assert len(strides) == num_stages, \
581
- 'The length of strides should be equal to num_stages, '\
582
- f'while the strides is {strides}, the length of '\
583
- f'strides is {len(strides)}, and the num_stages is '\
584
- f'{num_stages}.'
585
- assert len(enc_num_convs) == num_stages, \
586
- 'The length of enc_num_convs should be equal to num_stages, '\
587
- f'while the enc_num_convs is {enc_num_convs}, the length of '\
588
- f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\
589
- f'{num_stages}.'
590
- assert len(dec_num_convs) == (num_stages-1), \
591
- 'The length of dec_num_convs should be equal to (num_stages-1), '\
592
- f'while the dec_num_convs is {dec_num_convs}, the length of '\
593
- f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\
594
- f'{num_stages}.'
595
- assert len(downsamples) == (num_stages-1), \
596
- 'The length of downsamples should be equal to (num_stages-1), '\
597
- f'while the downsamples is {downsamples}, the length of '\
598
- f'downsamples is {len(downsamples)}, and the num_stages is '\
599
- f'{num_stages}.'
600
- assert len(enc_dilations) == num_stages, \
601
- 'The length of enc_dilations should be equal to num_stages, '\
602
- f'while the enc_dilations is {enc_dilations}, the length of '\
603
- f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\
604
- f'{num_stages}.'
605
- assert len(dec_dilations) == (num_stages-1), \
606
- 'The length of dec_dilations should be equal to (num_stages-1), '\
607
- f'while the dec_dilations is {dec_dilations}, the length of '\
608
- f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\
609
- f'{num_stages}.'
610
- self.num_stages = num_stages
611
- self.strides = strides
612
- self.downsamples = downsamples
613
- self.norm_eval = norm_eval
614
-
615
- self.encoder = nn.ModuleList()
616
- self.decoder = nn.ModuleList()
617
-
618
- for i in range(num_stages):
619
- enc_conv_block = []
620
- if i != 0:
621
- if strides[i] == 1 and downsamples[i - 1]:
622
- enc_conv_block.append(nn.MaxPool2d(kernel_size=2))
623
- upsample = (strides[i] != 1 or downsamples[i - 1])
624
- self.decoder.append(
625
- UpConvBlock(
626
- conv_block=BasicConvBlock,
627
- in_channels=base_channels * 2**i,
628
- skip_channels=base_channels * 2**(i - 1),
629
- out_channels=base_channels * 2**(i - 1),
630
- num_convs=dec_num_convs[i - 1],
631
- stride=1,
632
- dilation=dec_dilations[i - 1],
633
- with_cp=with_cp,
634
- conv_cfg=conv_cfg,
635
- norm_cfg=norm_cfg,
636
- act_cfg=act_cfg,
637
- upsample_cfg=upsample_cfg if upsample else None,
638
- dcn=None,
639
- plugins=None))
640
-
641
- enc_conv_block.append(
642
- BasicConvBlock(
643
- in_channels=in_channels + attr_embedding,
644
- out_channels=base_channels * 2**i,
645
- num_convs=enc_num_convs[i],
646
- stride=strides[i],
647
- dilation=enc_dilations[i],
648
- with_cp=with_cp,
649
- conv_cfg=conv_cfg,
650
- norm_cfg=norm_cfg,
651
- act_cfg=act_cfg,
652
- dcn=None,
653
- plugins=None))
654
- self.encoder.append((nn.Sequential(*enc_conv_block)))
655
- in_channels = base_channels * 2**i
656
-
657
- def forward(self, x, attr_embedding):
658
- enc_outs = []
659
- Be, Ce = attr_embedding.size()
660
- for enc in self.encoder:
661
- _, _, H, W = x.size()
662
- x = enc(
663
- torch.cat([
664
- x,
665
- attr_embedding.view(Be, Ce, 1, 1).expand((Be, Ce, H, W))
666
- ],
667
- dim=1))
668
- enc_outs.append(x)
669
- dec_outs = [x]
670
- for i in reversed(range(len(self.decoder))):
671
- x = self.decoder[i](enc_outs[i], x)
672
- dec_outs.append(x)
673
-
674
- return dec_outs
675
-
676
- def init_weights(self, pretrained=None):
677
- """Initialize the weights in backbone.
678
-
679
- Args:
680
- pretrained (str, optional): Path to pre-trained weights.
681
- Defaults to None.
682
- """
683
- if isinstance(pretrained, str):
684
- logger = get_root_logger()
685
- load_checkpoint(self, pretrained, strict=False, logger=logger)
686
- elif pretrained is None:
687
- for m in self.modules():
688
- if isinstance(m, nn.Conv2d):
689
- kaiming_init(m)
690
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
691
- constant_init(m, 1)
692
- else:
693
- raise TypeError('pretrained must be a str or None')