parquet-converter commited on
Commit
f460016
·
1 Parent(s): 0038684

Update parquet files (step 81 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Captain Sim L 1011 Cracked Fly the L-1011 in Different Scenarios and Weather Conditions.md +0 -112
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberlink ColorDirector Ultra 9.0.2729.0 Activation Code The Ultimate Solution for Color Grading and Correction.md +0 -189
  3. spaces/1line/AutoGPT/autogpt/commands/git_operations.py +0 -26
  4. spaces/1line/AutoGPT/autogpt/speech/gtts.py +0 -22
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bar Council of Maharashtra and Goa Sanad Verification and Renewal Process.md +0 -163
  6. spaces/30Kanika/disease-classifier/app.py +0 -71
  7. spaces/801artistry/RVC801/infer/lib/slicer2.py +0 -260
  8. spaces/AIFILMS/StyleGANEX/models/stylegan2/__init__.py +0 -0
  9. spaces/ALSv/FSW/roop/ui.py +0 -231
  10. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/_base_/datasets/__init__.py +0 -0
  11. spaces/Accel/media-converter/README.md +0 -28
  12. spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/evaluator/basic.py +0 -64
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetExpandedChildHeight.js +0 -6
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/Factory.js +0 -13
  15. spaces/Alisonbakers/Fml/README.md +0 -10
  16. spaces/Alpaca233/SadTalker/src/face3d/util/html.py +0 -86
  17. spaces/Amon1/ChatGPTForAcadamic/self_analysis.md +0 -175
  18. spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.py +0 -212
  19. spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_act.py +0 -32
  20. spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py +0 -69
  21. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/scnet.py +0 -10
  22. spaces/Andy1621/uniformer_image_detection/tools/slurm_train.sh +0 -24
  23. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py +0 -4
  24. spaces/Ankita0512ghosh/Weather_bot/README.md +0 -12
  25. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/three_nn.py +0 -51
  26. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_emoji_codes.py +0 -0
  27. spaces/Axesys/Private-WebUI/app.py +0 -51
  28. spaces/Banbri/zcvzcv/src/app/interface/about/index.tsx +0 -46
  29. spaces/Bart92/RVC_HF/run.sh +0 -61
  30. spaces/Basil2k4/botbasil203/README.md +0 -10
  31. spaces/Benson/text-generation/Examples/Cmo Descargar El Campeonato Mundial De Cricket 2.md +0 -89
  32. spaces/Benson/text-generation/Examples/Cmo Descargar Pokemon Unite En Samsung Tablet.md +0 -46
  33. spaces/Benson/text-generation/Examples/Descargar Fr Leyendas 0.3.0 Mod Apk.md +0 -56
  34. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/transformer/permuter.py +0 -248
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py +0 -294
  36. spaces/Brasd99/SquadDetective/README.md +0 -12
  37. spaces/CAMP-ViL/Xplainer/utils.py +0 -40
  38. spaces/CVPR/LIVE/thrust/thrust/detail/function.h +0 -160
  39. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/uninitialized_fill.h +0 -22
  40. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/internal/copy_cross_system.h +0 -242
  41. spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/htc_mask_head.py +0 -43
  42. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/red/bot.js +0 -361
  43. spaces/Cosmopolitan/stabilityai-stable-diffusion-2-1/README.md +0 -12
  44. spaces/DHEIVER/Segmento_de_Angio_Coronariana_v3/preprocess.py +0 -13
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cu2qu/cli.py +0 -198
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/smb.py +0 -309
  47. spaces/DaFujaTyping/second-webui-docker/oh-no.py +0 -14
  48. spaces/DeepLabCut/MegaDetector_DeepLabCut/DLC_models/read.md +0 -3
  49. spaces/DonDoesStuff/openjourney-v4-demo/app.py +0 -3
  50. spaces/DragGan/DragGan/stylegan_human/openpose/src/util.py +0 -95
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Captain Sim L 1011 Cracked Fly the L-1011 in Different Scenarios and Weather Conditions.md DELETED
@@ -1,112 +0,0 @@
1
- <br />
2
- <h1>Captain Sim L 1011 Cracked: A Review of the Legendary TriStar for FSX and P3D</h1>
3
- <p>If you are a fan of classic airliners, you might have heard of Captain Sim L 1011 Cracked, a modification of the original Captain Sim L-1011 TriStar product that allows you to fly this iconic aircraft in your FSX or P3D simulator without paying for it. But what is Captain Sim L 1011 Cracked and why is it so popular among flight sim enthusiasts? In this article, we will review the features, installation, pros and cons, and alternatives of this add-on.</p>
4
- <h2>Features of Captain Sim L 1011 Cracked</h2>
5
- <p>Captain Sim L 1011 Cracked is based on the official product by Captain Sim, a well-known developer of high-quality aircraft add-ons for FSX and P3D. The original product was released in 2017 and included four highly accurate L-1011 variants in 25 paint schemes:</p>
6
- <h2>captain sim l 1011 cracked</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash; <a href="https://byltly.com/2uKzhb">https://byltly.com/2uKzhb</a></b></p><br /><br />
7
- <ul>
8
- <li>L-1011-500 civilian airliner</li>
9
- <li>Royal Air Force TriStar C2 (passenger)</li>
10
- <li>Royal Air Force TriStar K1 (passenger/air-to-air tanker)</li>
11
- <li>Royal Air Force TriStar KC1 (cargo/air-to-air tanker)</li>
12
- </ul>
13
- <p>The add-on also featured an advanced FMS unit based on the Honeywell HT9100 style FMS/CDU, with LNAV, advisory VNAV and SIDs/STARs; a fully functioning Flight Engineer's panel with custom-coded hydraulic, electrical, engine bleed control, fuel and pressurisation systems; new 3D primary instruments for increased readability and smoothness; eight new 2D panels, all easily accessible from a new 2D panel selector; copilot call-outs and cabin announcements for added immersion; numerous improvements to the flight dynamics, including a more realistic DLC (Direct Lift Control) system; a 2D pushback panel for controlling your pushback (distance and angle); a Flight Analysis tool for monitoring and logging your flight parameters; and an authentic sound set from Turbine Sound Studios. The add-on also came with a new comprehensive 100-page manual with flight tutorial and INS guide.</p>
14
- <h2>How to Install Captain Sim L 1011 Cracked</h2>
15
- <p>As you might have guessed, Captain Sim L 1011 Cracked is not an official product by Captain Sim, but rather a cracked version that bypasses the activation process and lets you use the add-on for free. However, this also means that you will not get any support or updates from Captain Sim, and that you might encounter some bugs or glitches that are not present in the original product. Therefore, use this add-on at your own risk and discretion.</p>
16
- <p>To install Captain Sim L 1011 Cracked, you will need to follow these steps:</p>
17
- <ol>
18
- <li>Download the installer from a reliable source. You can find it on various torrent sites or file-sharing platforms. Make sure to scan it for viruses before running it.</li>
19
- <li>Run the installer and follow the instructions. You will need to select your FSX or P3D folder as the destination.</li>
20
- <li>Copy the crack file to the main FSX or P3D folder. The crack file is usually named "csy1110.exe" or something similar. It will replace the original file that checks for activation.</li>
21
- <li>Enjoy flying the TriStar in your simulator. You can find it under "Captain Sim" in your aircraft selection menu.</li>
22
- </ol>
23
- <h2>Pros and Cons of Captain Sim L 1011 Cracked</h2>
24
- <p>Captain Sim L 1011 Cracked has its advantages and disadvantages. Here are some of them:</p>
25
- <h3>Pros:</h3>
26
- <ul>
27
- <li>High quality model: The TriStar is modeled with great attention to detail, both externally and internally. The textures are high resolution and realistic, and the animations are smooth and accurate.</li>
28
- <li>Realistic systems: The add-on simulates most of the systems of the real aircraft, including the FMS, INS, FE panel, hydraulic, electrical, fuel, pressurisation, DLC, etc. You will need to follow the procedures and checklists to operate them correctly.</li>
29
- <li>Immersive sound: The sound set by Turbine Sound Studios adds a lot of realism and atmosphere to your flights. You will hear the distinctive roar of the RB211 engines, as well as various cockpit noises, warnings, switches, etc.</li>
30
- <li>Detailed manual: The add-on comes with a comprehensive manual that covers everything you need to know about flying the TriStar. It includes a history of the aircraft, a description of its systems, a flight tutorial, an INS guide, performance charts, checklists, etc.</li>
31
- </ul>
32
- <h3>Cons:</h3>
33
- <ul>
34
- <li>High system requirements: The add-on is quite demanding on your computer resources. You will need a powerful PC to run it smoothly at high settings. You might experience some stutters or low FPS if your system is not up to par.</li>
35
- <li>Some bugs and glitches: As mentioned before, this is not an official product by Captain Sim, so you might encounter some issues that are not present in the original product. For example, some users have reported problems with landing lights not working properly, autopilot not following altitude constraints correctly, etc.</li>
36
- <li>No support from Captain Sim: Since this is a cracked version of their product, you will not get any support or updates from Captain Sim. If you have any questions or problems with the add-on, you will have to rely on other users or forums for help.</li>
37
- </ul>
38
- <h2>Alternatives to Captain Sim L 1011 Cracked</h2>
39
- <p>If you are looking for other options to fly the TriStar in your simulator, here are some alternatives:</p>
40
- <p>captain sim l 1011 download free<br />
41
- captain sim l 1011 tristar fsx<br />
42
- captain sim l 1011 p3d v4<br />
43
- captain sim l 1011 liveries<br />
44
- captain sim l 1011 review<br />
45
- captain sim l 1011 tutorial<br />
46
- captain sim l 1011 manual<br />
47
- captain sim l 1011 repaints<br />
48
- captain sim l 1011 cold and dark<br />
49
- captain sim l 1011 checklist<br />
50
- captain sim l 1011 sound pack<br />
51
- captain sim l 1011 vc upgrade<br />
52
- captain sim l 1011 sp2.2<br />
53
- captain sim l 1011 engine start<br />
54
- captain sim l 1011 flight plan<br />
55
- captain sim l 1011 autopilot<br />
56
- captain sim l 1011 fuel planner<br />
57
- captain sim l 1011 fmc update<br />
58
- captain sim l 1011 panel fix<br />
59
- captain sim l 1011 landing gear<br />
60
- captain sim l 1011 cockpit view<br />
61
- captain sim l 1011 takeoff speed<br />
62
- captain sim l 1011 navigation system<br />
63
- captain sim l 1011 fs2004<br />
64
- captain sim l 1011 fs9<br />
65
- captain sim l 1011 x plane 11<br />
66
- captain sim l 1011 prepar3d v5<br />
67
- captain sim l 1011 steam edition<br />
68
- captain sim l 1011 serial number<br />
69
- captain sim l 1011 activation key<br />
70
- captain sim l 1011 windows 10<br />
71
- captain sim l 1011 system requirements<br />
72
- captain sim l 1011 vs just flight<br />
73
- captain sim l 1011 vs cs classic pro line tristar<br />
74
- captain sim l 1011 vs lockheed martin tristar redux<br />
75
- captain sim l 1011 delta airlines<br />
76
- captain sim l 1011 british airways<br />
77
- captain sim l 1011 eastern airlines<br />
78
- captain sim l 1011 air canada<br />
79
- captain sim l 1011 cathay pacific<br />
80
- captain sim l 1011 air transat<br />
81
- captain sim l 1011 air india<br />
82
- captain sim l 1011 pan am<br />
83
- captain sim l 1011 twa<br />
84
- captain sim l 1011 united airlines<br />
85
- captain sim l 1011 american airlines<br />
86
- captain sim l 1011 fedex express <br />
87
- captain sim l 1011 orbis flying eye hospital <br />
88
- captain sim l 1011 royal air force <br />
89
- captain sim l 1011 royal saudi air force</p>
90
- <h3>Just Flight L-1011 TriStar Professional</h3>
91
- <p>This is another high-quality add-on by Just Flight that features three variants of the TriStar (L-1011-500 civilian airliner; RAF C2 passenger transport; RAF K1 passenger/tanker) with different engine options (RB211-524B4I or RB211-524B4). It also includes an advanced FMS unit with SID/STAR capability; custom-coded systems such as fuel management; realistic flight dynamics; copilot call-outs; authentic sound set; detailed animations; interactive checklists; comprehensive manual; etc. It costs $49.99 USD on Just Flight's website.</p>
92
- <h3>Historic Jetliners Group L-1011 TriStar</h3>
93
- # Conclusion <h1>Conclusion</h1>
94
- <p>Captain Sim L 1011 Cracked is a modification of the original Captain Sim L-1011 TriStar product that allows you to fly this legendary aircraft in your FSX or P3D simulator for free. It has many features that make it a realistic and immersive add-on, such as high quality model, realistic systems, immersive sound, detailed manual, etc. However, it also has some drawbacks, such as high system requirements, some bugs and glitches, no support from Captain Sim, etc. Therefore, you should use this add-on at your own risk and discretion.</p>
95
- <p>If you are looking for other options to fly the TriStar in your simulator, you can try Just Flight L-1011 TriStar Professional or Historic Jetliners Group L-1011 TriStar. Both are high-quality add-ons that offer different variants of the TriStar with different engine options. Just Flight's product is more advanced and comprehensive, but also more expensive. Historic Jetliners Group's product is more simple and basic, but also free.</p>
96
- <p>Whichever add-on you choose, you will enjoy flying the TriStar in your simulator. It is a unique and challenging aircraft that will test your skills and knowledge. It is also a piece of aviation history that deserves to be remembered and appreciated.</p>
97
- # FAQs <h2>FAQs</h2>
98
- <ul>
99
- <li><b>Q: What is the difference between the L-1011-500 and the other variants?</b></li>
100
- <li>A: The L-1011-500 is a shortened and longer-range version of the original L-1011. It has a fuselage reduced in length by four metres, an increased fuel capacity, and a more powerful variant of the RB211 engine.</li>
101
- <li><b>Q: What is the DLC system and how does it work?</b></li>
102
- <li>A: The DLC system stands for Direct Lift Control. It is a system that allows the pilot to control the lift of the aircraft by using spoilers on the wings. It helps to maintain a constant glide slope during approach and landing without changing the pitch attitude or thrust.</li>
103
- <li><b>Q: How do I use the FMS unit in the add-on?</b></li>
104
- <li>A: The FMS unit is based on the Honeywell HT9100 style FMS/CDU. You can use it to enter your flight plan, select SIDs/STARs, activate LNAV and VNAV modes, etc. You can find more information on how to use it in the manual.</li>
105
- <li><b>Q: How do I use the INS unit in the add-on?</b></li>
106
- <li>A: The INS unit stands for Inertial Navigation System. It is a system that uses gyroscopes and accelerometers to determine the position and velocity of the aircraft. You can use it to enter your waypoints, align your system, update your position, etc. You can find more information on how to use it in the manual.</li>
107
- <li><b>Q: How do I get support or updates for Captain Sim L 1011 Cracked?</b></li>
108
- <li>A: You don't. Captain Sim L 1011 Cracked is not an official product by Captain Sim, so you will not get any support or updates from them. If you have any questions or problems with the add-on, you will have to rely on other users or forums for help.</li>
109
- </ul>
110
- </p> 0a6ba089eb<br />
111
- <br />
112
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberlink ColorDirector Ultra 9.0.2729.0 Activation Code The Ultimate Solution for Color Grading and Correction.md DELETED
@@ -1,189 +0,0 @@
1
- <br />
2
- <h1>Cyberlink ColorDirector Ultra 9 Activation Code: A Complete Guide</h1>
3
- <p>If you are looking for a powerful and easy-to-use video editing software that can help you enhance the colors and mood of your videos, then you might want to check out Cyberlink ColorDirector Ultra 9.</p>
4
- <h2>Cyberlink ColorDirector Ultra 9.0.2729.0 Activation Code</h2><br /><p><b><b>Download Zip</b> - <a href="https://byltly.com/2uKz55">https://byltly.com/2uKz55</a></b></p><br /><br />
5
- <p>Cyberlink ColorDirector Ultra 9 is the latest version of the popular color grading software from Cyberlink, a leading company in multimedia software solutions.</p>
6
- <p>In this article, we will provide you with a complete guide on how to download, install, activate, and use Cyberlink ColorDirector Ultra 9 with the activation code.</p>
7
- <p>We will also cover some of the features and benefits of this software, as well as some tips and tricks to avoid any activation errors or issues.</p>
8
- <p>How to get Cyberlink ColorDirector Ultra 9.0.2729.0 for free<br />
9
- Cyberlink ColorDirector Ultra 9.0.2729.0 crack download<br />
10
- Cyberlink ColorDirector Ultra 9.0.2729.0 serial key generator<br />
11
- Cyberlink ColorDirector Ultra 9.0.2729.0 license key online<br />
12
- Cyberlink ColorDirector Ultra 9.0.2729.0 full version with key<br />
13
- Cyberlink ColorDirector Ultra 9.0.2729.0 product key finder<br />
14
- Cyberlink ColorDirector Ultra 9.0.2729.0 activation code giveaway<br />
15
- Cyberlink ColorDirector Ultra 9.0.2729.0 registration code free<br />
16
- Cyberlink ColorDirector Ultra 9.0.2729.0 patch file download<br />
17
- Cyberlink ColorDirector Ultra 9.0.2729.0 keygen software<br />
18
- Cyberlink ColorDirector Ultra 9 review and features<br />
19
- Cyberlink ColorDirector Ultra 9 tutorial and tips<br />
20
- Cyberlink ColorDirector Ultra 9 system requirements and compatibility<br />
21
- Cyberlink ColorDirector Ultra 9 discount and coupon code<br />
22
- Cyberlink ColorDirector Ultra 9 upgrade and update<br />
23
- Cyberlink ColorDirector Ultra 9 alternatives and competitors<br />
24
- Cyberlink ColorDirector Ultra 9 pros and cons<br />
25
- Cyberlink ColorDirector Ultra 9 vs Adobe Premiere Pro CC<br />
26
- Cyberlink ColorDirector Ultra 9 vs DaVinci Resolve Studio<br />
27
- Cyberlink ColorDirector Ultra 9 vs Filmora X<br />
28
- Cyberlink ColorDirector Ultra 9 best settings and presets<br />
29
- Cyberlink ColorDirector Ultra 9 color grading and correction<br />
30
- Cyberlink ColorDirector Ultra 9 motion tracking and stabilization<br />
31
- Cyberlink ColorDirector Ultra 9 split toning and HDR effect<br />
32
- Cyberlink ColorDirector Ultra 9 noise reduction and sharpening<br />
33
- Cyberlink ColorDirector Ultra 9 LUTs and filters download<br />
34
- Cyberlink ColorDirector Ultra 9 chroma key and green screen<br />
35
- Cyberlink ColorDirector Ultra 9 mask and blend modes<br />
36
- Cyberlink ColorDirector Ultra 9 crop and zoom tools<br />
37
- Cyberlink ColorDirector Ultra 9 timeline and keyframes<br />
38
- Cyberlink ColorDirector Ultra 9 export and render settings<br />
39
- Cyberlink ColorDirector Ultra 9 supported formats and codecs<br />
40
- Cyberlink ColorDirector Ultra 9 keyboard shortcuts and hotkeys<br />
41
- Cyberlink ColorDirector Ultra 9 troubleshooting and error fix<br />
42
- Cyberlink ColorDirector Ultra 9 customer support and feedback<br />
43
- How to uninstall Cyberlink ColorDirector Ultra 9 from Windows PC<br />
44
- How to install Cyberlink ColorDirector Ultra 9 on Mac OS X<br />
45
- How to transfer Cyberlink ColorDirector Ultra 9 license to another computer<br />
46
- How to activate Cyberlink ColorDirector Ultra 9 offline mode<br />
47
- How to deactivate Cyberlink ColorDirector Ultra 9 online mode<br />
48
- How to extend Cyberlink ColorDirector Ultra 9 trial period<br />
49
- How to renew Cyberlink ColorDirector Ultra 9 subscription plan<br />
50
- How to cancel Cyberlink ColorDirector Ultra 9 auto-renewal option<br />
51
- How to refund Cyberlink ColorDirector Ultra 9 purchase order<br />
52
- How to verify Cyberlink ColorDirector Ultra 9 email address</p>
53
- <p>So, without further ado, let's get started!</p>
54
- <h2>What is Cyberlink ColorDirector Ultra 9?</h2>
55
- <p>Cyberlink ColorDirector Ultra 9 is a video editing software that allows you to color grade your videos with professional tools and presets.</p>
56
- <p>Color grading is the process of adjusting the colors, contrast, brightness, saturation, hue, and tone of a video to create a specific mood or atmosphere.</p>
57
- <p>Color grading can make a huge difference in the quality and appeal of your videos, as it can enhance the emotions, storytelling, and aesthetics of your footage.</p>
58
- <h3>Features and benefits of Cyberlink ColorDirector Ultra 9</h3>
59
- <p>Some of the features and benefits of Cyberlink ColorDirector Ultra 9 are:</p>
60
- <ul>
61
- <li>It supports a wide range of video formats, including MP4, MOV, AVI, MKV, HEVC, etc.</li>
62
- <li>It has a user-friendly interface that is easy to navigate and customize.</li>
63
- <li>It has a powerful color engine that can handle high-resolution videos up to 4K.</li>
64
- <li>It has a variety of color grading tools and presets that can help you achieve any look or style you want.</li>
65
- <li>It has a split toning tool that can help you apply different colors to the shadows and highlights of your video.</li>
66
- <li>It has a color shift tool that can help you change the hue or tint of your video.</li>
67
- <li>It has a color match tool that can help you match the colors of different clips or scenes.</li>
68
- <li>It has a color splash tool that can help you isolate a specific color or object in your video.</li>
69
- <li>It has a motion tracking tool that can help you apply color effects to moving objects or subjects in your video.</li>
70
- <li>It has a keyframe control tool that can help you adjust the intensity or duration of your color effects over time.</li>
71
- <li>It has a global adjustment tool that can help you apply color corrections or enhancements to your entire video.</li>
72
- <li>It has a regional adjustment tool that can help you apply color corrections or enhancements to specific areas or regions in your video.</li>
73
- <li>It has an HDR effect tool that can help you create stunning high dynamic range videos with enhanced details and contrast.</li>
74
- <li>It has an AI style transfer tool that can help you apply artistic styles or filters to your video based on famous paintings or photos.</li>
75
- <li>It has an export and share tool that can help you save or upload your videos to various platforms or devices.</li>
76
- </ul>
77
- <h3>System requirements and compatibility of Cyberlink ColorDirector Ultra 9</h3>
78
- <p>To run Cyberlink ColorDirector Ultra 9 smoothly on your computer, you need to meet the following system requirements:</p>
79
- <table>
80
- <tr><th>Operating system</th><th>Windows 10/8/7 (64-bit only)</th></tr>
81
- <tr><td>CPU</td><td>Intel Core i-series or AMD Phenom II or above</td></tr>
82
- <tr><td>RAM</td><td>4 GB (6 GB recommended)</td></tr>
83
- <tr><td>HDD</td><td>1 GB for installation (10 GB recommended)</td></tr>
84
- <tr><td>GPU</td><td>NVIDIA GeForce GTX/RTX series or AMD Radeon RX series or above (with at least 2 GB VRAM)</td></tr>
85
- <tr><td>Screen resolution</td><td>1024 x 768 (1920 x 1080 recommended)</td></tr>
86
- <tr><td>Internet connection</td><td>Required for activation and updates</td></tr>
87
- </table>
88
- <p>Cyberlink ColorDirector Ultra 9 is compatible with most video editing software, such as Adobe Premiere Pro, Adobe After Effects, Final Cut Pro X, DaVinci Resolve, etc.</p>
89
- <p>You can use it as a standalone application or as a plugin for these software.</p>
90
- <h2>How to download and install Cyberlink ColorDirector Ultra 9?</h2>
91
- <p>To download and install Cyberlink ColorDirector Ultra 9 on your computer, you need to follow these steps:</p>
92
- <h3>Steps to download Cyberlink ColorDirector Ultra 9</h3>
93
- <ol>
94
- <li>Go to the official website of Cyberlink at <a href="https://www.cyberlink.com/">https://www.cyberlink.com/</a>.</li>
95
- <li>Navigate to the Products tab and select Video Editing from the drop-down menu.</li>
96
- <li>Select ColorDirector from the list of products and click on Free Download.</li>
97
- <li>You will be redirected to another page where you need to enter your name and email address to get the download link.</li>
98
- <li>You will receive an email from Cyberlink with the download link and instructions for installation.</li>
99
- <li>Click on the download link in the email and save the file on your computer.</li>
100
- </ol>
101
- <h3>Steps to install Cyberlink ColorDirector Ultra 9</h3>
102
- <ol>
103
- <li>Locate the downloaded file on your computer and double-click on it to launch the installer.</li>
104
- <li>Select your preferred language and click on OK.</li>
105
- <li>Acknowledge the license agreement and click on Next.</li>
106
- <li>Select your destination folder for installation and click on Next.</li>
107
- <li>Select whether you want to create desktop shortcuts or not and click on Next.</li>
108
- <li>Select whether you want to install additional components or not and click on Next.</li>
109
- <li>The installation process will begin and may take several minutes depending on your system configuration.</li>
110
- <li>Once the installation is complete, click on Finish.</li>
111
- </ol>
112
- <h2>How to activate Cyberlink ColorDirector Ultra 9?</h2>
113
- <p>To activate Cyberlink ColorDirector Ultra 9 with the activation code, you need to follow these steps:</p>
114
- <h3>Steps to activate Cyberlink ColorDirector Ultra 9 with the activation code</h3>
115
- <ol>
116
- <li>Lunch Cyberlink Director Ultra 9 from your desktop or start menu.</li>
117
- <li>Click on the Activate button on the top right corner of the main window.</li>
118
- <li>Enter your activation code in the pop-up window and click on OK. You can find your activation code in the order confirmation email that you received from Cyberlink or in the product box if you purchased a physical copy.</li>
119
- <li>Wait for the activation process to complete and click on OK.</li>
120
- <li>You have successfully activated Cyberlink ColorDirector Ultra 9 with the activation code and you can enjoy all the features and benefits of this software.</li>
121
- </ol>
122
- <h3>Tips and tricks to avoid activation errors and issues</h3>
123
- <p>Some tips and tricks to avoid activation errors and issues are:</p>
124
- <ul>
125
- <li>Make sure you have a stable internet connection during the activation process.</li>
126
- <li>Make sure you enter the activation code correctly and without any spaces or dashes.</li>
127
- <li>Make sure you use the same email address that you used to purchase or register Cyberlink ColorDirector Ultra 9.</li>
128
- <li>Make sure you do not activate Cyberlink ColorDirector Ultra 9 on more than one computer with the same activation code. If you need to activate it on another computer, you need to deactivate it from the previous one first.</li>
129
- <li>If you encounter any activation errors or issues, you can contact Cyberlink customer support for assistance.</li>
130
- </ul>
131
- <h2>How to use Cyberlink ColorDirector Ultra 9?</h2>
132
- <p>To use Cyberlink ColorDirector Ultra 9 to color grade your videos, you need to follow these steps:</p>
133
- <h3>A brief overview of the user interface and tools of Cyberlink ColorDirector Ultra 9</h3>
134
- <p>The user interface of Cyberlink ColorDirector Ultra 9 consists of four main sections:</p>
135
- <ul>
136
- <li>The media library, where you can import, organize, and preview your video clips.</li>
137
- <li>The timeline, where you can arrange, trim, and edit your video clips.</li>
138
- <li>The preview window, where you can view and play back your video clips.</li>
139
- <li>The adjustment panel, where you can access and apply various color grading tools and presets.</li>
140
- </ul>
141
- <p>You can customize the user interface by resizing, docking, undocking, or hiding any of these sections according to your preference.</p>
142
- <p>You can also access other features and settings from the menu bar or the toolbar at the top of the main window.</p>
143
- <h3>A simple tutorial on how to color grade a video with Cyberlink ColorDirector Ultra 9</h3>
144
- <ol>
145
- <li>Import your video clip into the media library by clicking on the Import Media button or by dragging and dropping it from your computer.</li>
146
- <li>Drag and drop your video clip from the media library to the timeline.</li>
147
- <li>Select your video clip on the timeline and click on the Adjustment button on the toolbar to open the adjustment panel.</li>
148
- <li>In the adjustment panel, you can choose from various color grading tools and presets to apply to your video clip. You can also adjust the parameters of each tool or preset by using the sliders or entering values manually.</li>
149
- <li>Some of the color grading tools and presets that you can use are:</li>
150
- <ul>
151
- <li>Global Adjustment: This tool allows you to apply color corrections or enhancements to your entire video clip. You can adjust parameters such as exposure, contrast, white balance, saturation, vibrance, etc.</li>
152
- <li>Regional Adjustment: This tool allows you to apply color corrections or enhancements to specific areas or regions in your video clip. You can use masks or selection tools to define the areas or regions that you want to adjust. You can also adjust parameters such as exposure, contrast, white balance, saturation, vibrance, etc. for each region separately.</li>
153
- <li>HDR Effect: This tool allows you to create stunning high dynamic range videos with enhanced details and contrast. You can adjust parameters such as strength, radius, tone compression, etc.</li>
154
- <li>AI Style Transfer: This tool allows you to apply artistic styles or filters to your video clip based on famous paintings or photos. You can choose from various styles or filters such as impressionist, expressionist, abstract, etc. You can also adjust parameters such as strength, detail preservation, etc.</li>
155
- <li>Split Toning: This tool allows you to apply different colors to the shadows and highlights of your video. You can choose from various presets or customize your own colors.</li>
156
- <li>Color Shift: This tool allows you to change the hue or tint of your video. You can use the color wheel or the sliders to adjust the hue, saturation, and lightness of your video.</li>
157
- <li>Color Match: This tool allows you to match the colors of different clips or scenes. You can use a reference clip or an image to set the target colors for your video.</li>
158
- <li>Color Splash: This tool allows you to isolate a specific color or object in your video. You can use the brush tool or the selection tool to define the area that you want to keep in color while the rest of the video is in black and white.</li>
159
- <li>Motion Tracking: This tool allows you to apply color effects to moving objects or subjects in your video. You can use the tracking tool to select and track the object or subject that you want to apply the color effect to.</li>
160
- <li>Keyframe Control: This tool allows you to adjust the intensity or duration of your color effects over time. You can use the keyframe timeline to add, delete, or modify keyframes for each color effect.</li>
161
- </ul>
162
- <li>As you apply and adjust the color grading tools and presets, you can preview the results in the preview window. You can also use the split view or side-by-side view to compare the original and edited videos.</li>
163
- <li>Once you are satisfied with your color grading, you can export and share your video by clicking on the Export button on the toolbar. You can choose from various formats, resolutions, quality settings, and destinations for your video.</li>
164
- </ol>
165
- <h2>Conclusion</h2>
166
- <p>Cyberlink ColorDirector Ultra 9 is a powerful and easy-to-use video editing software that can help you enhance the colors and mood of your videos with professional tools and presets.</p>
167
- <p>With Cyberlink ColorDirector Ultra 9, you can color grade your videos with ease and creativity, and achieve any look or style you want.</p>
168
- <p>Whether you are a beginner or a pro, Cyberlink ColorDirector Ultra 9 can help you take your videos to the next level with stunning color effects.</p>
169
- <h3>A call to action and a link to the official website of Cyberlink ColorDirector Ultra 9</h3>
170
- <p>If you are interested in trying out Cyberlink ColorDirector Ultra 9 for yourself, you can download a free trial version from the official website of Cyberlink at <a href="https://www.cyberlink.com/products/colordirector-ultra/features_en_US.html">https://www.cyberlink.com/products/colordirector-ultra/features_en_US.html</a>.</p>
171
- <p>If you are ready to buy Cyberlink ColorDirector Ultra 9, you can also get it from the official website of Cyberlink at a discounted price for a limited time.</p>
172
- <p>Don't miss this opportunity to get Cyberlink ColorDirector Ultra 9 with the activation code and start color grading your videos like a pro!</p>
173
- <h2>FAQs</h2>
174
- <p>Here are some frequently asked questions about Cyberlink ColorDirector Ultra 9 and their answers:</p>
175
- <ol>
176
- <li><b>What is the difference between Cyberlink ColorDirector Ultra 9 and Cyberlink PowerDirector?</b></li>
177
- <p>Cyberlink ColorDirector Ultra 9 is a specialized software for color grading videos, while Cyberlink PowerDirector is a comprehensive software for video editing and production. You can use Cyberlink ColorDirector Ultra 9 as a standalone application or as a plugin for Cyberlink PowerDirector.</p>
178
- <li><b>How do I get updates for Cyberlink ColorDirector Ultra 9?</b></li>
179
- <p>You can get updates for Cyberlink ColorDirector Ultra 9 by clicking on the Update button on the toolbar or by going to Help > Check for Updates. You need an internet connection to download and install updates.</p>
180
- <li><b>How do I contact customer support for Cyberlink ColorDirector Ultra 9?</b></li>
181
- <p>You can contact customer support for Cyberlink ColorDirector Ultra 9 by going to Help > Contact Customer Support. You can also visit <a href="https://www.cyberlink.com/support/index.html">https://www.cyberlink.com/support/index.html</a> for more information and resources.</p>
182
- <li><b>How do I deactivate Cyberlink ColorDirector Ultra 9 from one computer and activate it on another?</b></li>
183
- <p>You can deactivate Cyberlink ColorDirector Ultra 9 from one computer by going to Help > Deactivate. You can then activate it on another computer by following the steps mentioned above in this article.</p>
184
- <li><b>How do I import and export LUTs in Cyberlink ColorDirector Ultra 9?</b></li>
185
- <p>You can import and export LUTs in Cyberlink ColorDirector Ultra 9 by going to Adjustment > LUTs > Import/Export. You can import LUTs in .cube format and export LUTs in .cdl format.</p>
186
- </ol>
187
- </p> 0a6ba089eb<br />
188
- <br />
189
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/commands/git_operations.py DELETED
@@ -1,26 +0,0 @@
1
- """Git operations for autogpt"""
2
- import git
3
-
4
- from autogpt.config import Config
5
- from autogpt.workspace import path_in_workspace
6
-
7
- CFG = Config()
8
-
9
-
10
- def clone_repository(repo_url: str, clone_path: str) -> str:
11
- """Clone a GitHub repository locally
12
-
13
- Args:
14
- repo_url (str): The URL of the repository to clone
15
- clone_path (str): The path to clone the repository to
16
-
17
- Returns:
18
- str: The result of the clone operation"""
19
- split_url = repo_url.split("//")
20
- auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
21
- safe_clone_path = path_in_workspace(clone_path)
22
- try:
23
- git.Repo.clone_from(auth_repo_url, safe_clone_path)
24
- return f"""Cloned {repo_url} to {safe_clone_path}"""
25
- except Exception as e:
26
- return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/speech/gtts.py DELETED
@@ -1,22 +0,0 @@
1
- """ GTTS Voice. """
2
- import os
3
-
4
- import gtts
5
- from playsound import playsound
6
-
7
- from autogpt.speech.base import VoiceBase
8
-
9
-
10
- class GTTSVoice(VoiceBase):
11
- """GTTS Voice."""
12
-
13
- def _setup(self) -> None:
14
- pass
15
-
16
- def _speech(self, text: str, _: int = 0) -> bool:
17
- """Play the given text."""
18
- tts = gtts.gTTS(text)
19
- tts.save("speech.mp3")
20
- playsound("speech.mp3", True)
21
- os.remove("speech.mp3")
22
- return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bar Council of Maharashtra and Goa Sanad Verification and Renewal Process.md DELETED
@@ -1,163 +0,0 @@
1
- <br />
2
- <h1>Bar Council of Maharashtra and Goa Sanad Download</h1>
3
- <p>If you are a law graduate who wants to practice as an advocate in Maharashtra or Goa, you need to enroll yourself with the Bar Council of Maharashtra and Goa (BCMG) and obtain a sanad. A sanad is a license or certificate that grants you the right to practice law in the courts and tribunals in these states. In this article, we will explain what is BCMG, what is sanad, how to apply for sanad online or offline, and what are the benefits of online application over offline application.</p>
4
- <h2>What is the Bar Council of Maharashtra and Goa (BCMG)?</h2>
5
- <p>The Bar Council of Maharashtra and Goa (BCMG) is a statutory body constituted under the Advocates Act, 1961 for the states of Maharashtra and Goa, and the union territories of Dadra and Nagar Haveli and Daman and Diu. The BCMG has various functions such as:</p>
6
- <h2>bar council of maharashtra and goa sanad download</h2><br /><p><b><b>Download Zip</b> &#10145; <a href="https://urlin.us/2uT0t4">https://urlin.us/2uT0t4</a></b></p><br /><br />
7
- <ul>
8
- <li>Enrolling advocates on its roll and granting them sanad</li>
9
- <li>Regulating and maintaining the standards of professional conduct and etiquette for advocates</li>
10
- <li>Exercising disciplinary jurisdiction over advocates on its roll</li>
11
- <li>Promoting legal education and law reforms</li>
12
- <li>Conducting seminars, workshops, conferences, and publications on legal topics</li>
13
- <li>Organizing legal aid and welfare schemes for advocates</li>
14
- </ul>
15
- <p>The BCMG is represented by 25 elected members from its territory, one member from each state's advocate general, and one member from the Bar Council of India. The BCMG elects its chairman, vice-chairman, secretary, treasurer, and various committees to carry out its functions. The BCMG has its office at High Court Extension Building, Fort, Mumbai. You can visit its official website [here](^3^) for more information.</p>
16
- <h2>What is sanad and why is it important for advocates?</h2>
17
- <p>Sanad is a Hindi word that means a deed, charter, warrant, patent, or diploma. In legal context, it means a license or certificate that authorizes an advocate to practice law in a particular jurisdiction. Sanad is issued by the state bar council where an advocate intends to practice. Sanad contains details such as name, enrollment number, date of enrollment, address, photograph, signature, seal, etc. of an advocate.</p>
18
- <p>Sanad is important for advocates because:</p>
19
- <ul>
20
- <li>It is a proof of identity and qualification as an advocate</li>
21
- <li>It is a prerequisite for appearing before any court or tribunal as an advocate</li>
22
- <li>It is a condition for becoming a member of any bar association or law firm</li>
23
- <li>It is a requirement for participating in any bar council election or activity</li>
24
- <li>It is a duty to uphold the dignity and integrity of the legal profession</li>
25
- </ul>
26
- <h2>How to apply for sanad online or offline?</h2>
27
- <p>An advocate can apply for sanad online or offline by following the respective procedures. Let us see the details of each procedure below.</p>
28
- <h3>Online application procedure</h3>
29
- <p>If you want to apply for sanad online, you need to meet the following eligibility criteria and have the following documents ready:</p>
30
- <ul>
31
- <li>You must be a citizen of India and a law graduate from a recognized university or institution</li>
32
- <li>You must have passed the All India Bar Examination (AIBE) conducted by the Bar Council of India</li>
33
- <li>You must have a valid email id and mobile number</li>
34
- <li>You must have scanned copies of your passport size photograph, signature, AIBE certificate, law degree certificate, mark sheets, identity proof, address proof, and any other documents as required by the BCMG</li>
35
- <li>You must have a debit card, credit card, or net banking facility to pay the online fees</li>
36
- </ul>
37
- <p>Once you have all the above-mentioned requirements, you can follow these steps to fill the online application form and pay the fees:</p>
38
- <p>How to download sanad from bar council of maharashtra and goa<br />
39
- Bar council of maharashtra and goa sanad verification online<br />
40
- Bar council of maharashtra and goa sanad renewal process<br />
41
- Bar council of maharashtra and goa sanad application form<br />
42
- Bar council of maharashtra and goa sanad status check<br />
43
- Bar council of maharashtra and goa sanad fees payment<br />
44
- Bar council of maharashtra and goa sanad registration number<br />
45
- Bar council of maharashtra and goa sanad eligibility criteria<br />
46
- Bar council of maharashtra and goa sanad exam date<br />
47
- Bar council of maharashtra and goa sanad certificate format<br />
48
- Bar council of maharashtra and goa sanad affidavit sample<br />
49
- Bar council of maharashtra and goa sanad rules and regulations<br />
50
- Bar council of maharashtra and goa sanad validity period<br />
51
- Bar council of maharashtra and goa sanad transfer procedure<br />
52
- Bar council of maharashtra and goa sanad cancellation process<br />
53
- Bar council of maharashtra and goa sanad lost or damaged<br />
54
- Bar council of maharashtra and goa sanad change of name or address<br />
55
- Bar council of maharashtra and goa sanad benefits and privileges<br />
56
- Bar council of maharashtra and goa sanad professional misconduct and etiquette<br />
57
- Bar council of maharashtra and goa sanad welfare schemes for advocates<br />
58
- Bar council of maharashtra and goa sanad insurance scheme details<br />
59
- Bar council of maharashtra and goa sanad pf amnesty scheme information<br />
60
- Bar council of maharashtra and goa sanad mediclaim scheme application form<br />
61
- Bar council of maharashtra and goa sanad aid fund claim form<br />
62
- Bar council of maharashtra and goa sanad professional tax form download<br />
63
- Bar council of maharashtra and goa sanad contact details and address<br />
64
- Bar council of maharashtra and goa sanad chairman and members list<br />
65
- Bar council of maharashtra and goa sanad former chairmen and vice-chairmen list<br />
66
- Bar council of maharashtra and goa sanad disciplinary committee members list<br />
67
- Bar council of maharashtra and goa sanad disciplinary cases status update<br />
68
- Bar council of maharashtra and goa sanad bar association affiliation form online<br />
69
- Bar council of maharashtra and goa sanad bar association renewal form online<br />
70
- Bar council of maharashtra and goa sanad firm registration form online<br />
71
- Bar council of maharashtra and goa sanad bcmg rules pdf download<br />
72
- Bar council of maharashtra and goa sanad bar association rules pdf download<br />
73
- Bar council of maharashtra and goa sanad photo gallery online view<br />
74
- Bar council of maharashtra and goa sanad events calendar online view<br />
75
- Bar council of maharashtra and goa sanad notifications online view<br />
76
- Bar council of maharashtra and goa sanad news updates online view<br />
77
- Bar council of maharashtra and goa sanad articles online view</p>
78
- <ol>
79
- <li>Visit the official website of BCMG [here] and click on "Online Enrollment" option</li>
80
- <li>Register yourself by providing your name, email id, mobile number, and password. You will receive an OTP on your mobile number for verification</li>
81
- <li>Login with your credentials and fill the online application form with your personal details, educational details, professional details, etc.</li>
82
- <li>Upload the scanned copies of your photograph, signature, and documents in the prescribed format and size</li>
83
- <li>Preview your application form and make any changes if required. Then, click on "Submit" button</li>
84
- <li>Pay the online fees of Rs. 5000/- (Rs. 2500/- for SC/ST candidates) through debit card, credit card, or net banking. You will receive a confirmation message and receipt on your email id and mobile number</li>
85
- </ol>
86
- <h3>How to check the status of the application and download the sanad certificate</h3>
87
- <p>After submitting the online application form and paying the fees, you can check the status of your application by following these steps:</p>
88
- <ol>
89
- <li>Visit the official website of BCMG [here] and click on "Online Enrollment" option</li>
90
- <li>Login with your credentials and click on "Track Application Status" option</li>
91
- <li>Enter your application number and date of birth and click on "Search" button</li>
92
- <li>You will see the current status of your application such as pending, approved, rejected, etc.</li>
93
- </ol>
94
- <p>If your application is approved, you can download your sanad certificate by following these steps:</p>
95
- <ol>
96
- <li>Visit the official website of BCMG [here] and click on "Online Enrollment" option</li>
97
- <li>Login with your credentials and click on "Download Sanad Certificate" option</li>
98
- <li>Enter your enrollment number and date of birth and click on "Download" button</li>
99
- <li>You will see your sanad certificate in PDF format. You can save it or print it for future use</li>
100
- </ol>
101
- <h3>Offline application procedure</h3>
102
- <p>If you want to apply for sanad offline, you need to get the offline application form from the BCMG office or any district bar association in Maharashtra or Goa. The offline form costs Rs. 100/-. You also need to have the following documents ready:</p>
103
- <ul>
104
- <li>AIBE certificate (original and photocopy)</li>
105
- <li>Law degree certificate (original and photocopy)</li>
106
- <li>Mark sheets of all semesters or years of law course (original and photocopy)</li>
107
- <li>Identity proof such as Aadhaar card, PAN card, voter id card, etc. (original and photocopy)</li>
108
- <li>Address proof such as electricity bill, water bill, ration card, etc. (original and photocopy)</li>
109
- <li>Four passport size photographs with white background</li>
110
- <li>Two self-addressed envelopes with Rs. 41/- postal stamps each</li>
111
- <li>Any other documents as required by the BCMG</li> Once you have the offline application form and the documents ready, you can follow these steps to fill the offline form and attach the documents:</p>
112
- <ol>
113
- <li>Read the instructions given in the form carefully and fill it with a blue or black ball pen</li>
114
- <li>Write your name, father's name, date of birth, gender, nationality, category, address, email id, mobile number, etc. in the relevant fields</li>
115
- <li>Write your educational details such as name of the university or institution, year of passing, marks obtained, etc. in the relevant fields</li>
116
- <li>Write your professional details such as AIBE roll number, date of passing, enrollment number, etc. in the relevant fields</li>
117
- <li>Sign the declaration and undertaking at the end of the form</li>
118
- <li>Paste your photograph and signature in the designated places</li>
119
- <li>Attach the photocopies of your documents along with the form and staple them securely</li>
120
- </ol>
121
- <h3>Where to submit the offline form and pay the fees</h3>
122
- <p>After filling the offline form and attaching the documents, you can submit it to the BCMG office or any district bar association in Maharashtra or Goa. You also need to pay the offline fees of Rs. 5000/- (Rs. 2500/- for SC/ST candidates) by demand draft or pay order in favor of "Bar Council of Maharashtra and Goa" payable at Mumbai. You need to write your name, address, and mobile number on the back of the demand draft or pay order. You will receive an acknowledgment slip from the BCMG office or district bar association after submitting the form and fees.</p>
123
- <h3>How to collect the sanad certificate from the BCMG office</h3>
124
- <p>After submitting the offline form and fees, you need to wait for at least 15 days for your application to be processed and verified by the BCMG. You will receive a notification from the BCMG on your email id or mobile number when your sanad certificate is ready. You can then visit the BCMG office personally and collect your sanad certificate by showing your acknowledgment slip and identity proof. You need to sign a receipt after collecting your sanad certificate.</p>
125
- <h2>Benefits of online application over offline application</h2>
126
- <p>As you can see, applying for sanad online is much easier and faster than applying for sanad offline. Here are some of the benefits of online application over offline application:</p>
127
- <ul>
128
- <li>Convenience and time-saving: You can apply for sanad online from anywhere and at any time without visiting any office or association. You can also save your time by avoiding long queues and delays.</li>
129
- <li>Transparency and accuracy: You can check the status of your application online and track its progress. You can also avoid any errors or discrepancies in your application by filling it online.</li>
130
- <li>Security and privacy: You can pay the fees online securely through a trusted payment gateway. You can also protect your personal and professional information from any misuse or leakage by applying online.</li>
131
- </ul>
132
- <h2>Conclusion</h2>
133
- <p>In this article, we have explained what is BCMG, what is sanad, how to apply for sanad online or offline, and what are the benefits of online application over offline application. We hope that this article has helped you understand how to obtain your sanad certificate from BCMG easily and quickly.</p>
134
- <p>If you are an advocate who has already obtained your sanad certificate from BCMG, here are some tips for you to keep your sanad updated and valid:</p>
135
- <ul>
136
- <li>Renew your sanad every five years by paying a renewal fee of Rs. 1000/- (Rs. 500/- for SC/ST candidates) online or offline</li>
137
- <li>Change your address, name, or any other details in your sanad by applying for a duplicate sanad with a fee of Rs. 1000/- (Rs. 500/- for SC/ST candidates) online or offline</li>
138
- <li>Keep a copy of your sanad certificate with you at all times while practicing as an advocate</li>
139
- <li>Display your sanad certificate prominently in your office or chamber</li>
140
- <li>Do not lend or transfer your sanad certificate to anyone else</li>
141
- </ul>
142
- <p>If you have any queries or complaints regarding your sanad certificate or any other matter related to BCMG, you can contact them through these details:</p>
143
- <ul>
144
- <li>Email: [email protected]</li>
145
- <li>Phone: 022-22650284 / 22650285 / 22650286 / 22650287 / 22650288 / 22650289 / 22650290 / 22650291 / 22650292 / 22650293 / 22650294 / 22650295 / 22650296 / 22650297 / 22650298 / 22650299</li>
146
- <li>Address: Bar Council of Maharashtra and Goa, High Court Extension Building, Fort, Mumbai - 400032</li>
147
- </ul>
148
- <h2>FAQs</h2>
149
- <p>Here are some of the frequently asked questions (FAQs) about sanad and BCMG:</p>
150
- <ol>
151
- <li><b>What is the validity period of sanad?</b></li>
152
- <p>Sanad is valid for five years from the date of issue. You need to renew your sanad before it expires by paying a renewal fee online or offline.</p>
153
- <li><b>What are the consequences of not having a valid sanad?</b></li>
154
- <p>If you do not have a valid sanad, you cannot practice as an advocate in Maharashtra or Goa. You will also face disciplinary action from the BCMG and may lose your membership and privileges.</p>
155
- <li><b>How to renew or change the details of sanad?</b></li>
156
- <p>You can renew or change the details of your sanad by applying for a duplicate sanad online or offline. You need to pay a fee of Rs. 1000/- (Rs. 500/- for SC/ST candidates) and submit the required documents along with your application.</p>
157
- <li><b>How to verify the authenticity of sanad?</b></li>
158
- <p>You can verify the authenticity of your sanad by visiting the official website of BCMG [here] and clicking on "Verify Sanad" option. You need to enter your enrollment number and date of birth and click on "Verify" button. You will see the details of your sanad such as name, address, date of issue, validity, etc.</p>
159
- <li><b>How to file a complaint against a fake or suspended advocate?</b></li>
160
- <p>If you come across any fake or suspended advocate who is practicing law without a valid sanad, you can file a complaint against him or her to the BCMG by writing a letter or email with the details of the advocate and attaching any evidence or proof. You can also lodge an FIR with the police against such an advocate.</p>
161
- </ol></p> 197e85843d<br />
162
- <br />
163
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/30Kanika/disease-classifier/app.py DELETED
@@ -1,71 +0,0 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import numpy as np
4
- from joblib import load
5
-
6
- # Title :
7
- st.title("Disease :blue[Detector] 🕵️")
8
-
9
- # Reading CSVs :
10
- data = pd.read_csv("files/Training.csv").drop("prognosis", axis=1)
11
- ds = pd.read_csv("files/disease_description.csv")
12
- pr = pd.read_csv("files/disease_precaution.csv")
13
-
14
- # Columns (representing symptoms) :
15
- dis= list(data.columns)
16
-
17
- # Loading model and saved dictionary :
18
- model = load("model.joblib")
19
- pro = load("disease.joblib")
20
-
21
- # Creating an array with zeros :
22
- arr = np.zeros(135)
23
-
24
- opt = st.multiselect(
25
- "Please Select Your :red[Symptoms :]",
26
- dis
27
- )
28
-
29
- # Creating a function to predict and store :
30
- opt = list(opt)
31
- def predictions(opt):
32
- idx = []
33
- for i in opt:
34
- idx.append(dis.index(i))
35
-
36
- for i in idx:
37
- arr[i] = 1
38
- arr[-1]= len(opt)
39
- pred = model.predict([arr])
40
-
41
- for key in pro.keys():
42
- if pro[key] == pred[0]:
43
- print(f'''Disease:{key}
44
- Array:{arr}''')
45
- return key
46
-
47
- # Description :
48
- def give_des(d):
49
- return [ds[ds["Disease"]==d].Symptom_Description][0]
50
-
51
- # Description :
52
- def give_pre(d):
53
- return list(pr[pr["Disease"]==d].Symptom_precaution_0)[0],list(pr[pr["Disease"]==d].Symptom_precaution_1)[0],list(pr[pr["Disease"]==d].Symptom_precaution_2)[0], list(pr[pr["Disease"]==d].Symptom_precaution_3)[0]
54
-
55
-
56
- if st.button("Detect"):
57
- cola, colb, colc = st.columns(3)
58
- prognosis = predictions(opt)
59
-
60
- description = give_des(prognosis)
61
- p1, p2, p3, p4 = give_pre(prognosis)
62
-
63
- with colb:
64
- try:
65
- st.header(prognosis)
66
- st.subheader("Description :")
67
- st.caption(list(description.values)[0])
68
- st.subheader("Precaution :")
69
- st.caption(f"- {p1}\n- {p2}\n- {p3}\n- {p4}")
70
- except:
71
- st.header(":red[Something Went Wrong] ⚠️")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/slicer2.py DELETED
@@ -1,260 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- # This function is obtained from librosa.
5
- def get_rms(
6
- y,
7
- frame_length=2048,
8
- hop_length=512,
9
- pad_mode="constant",
10
- ):
11
- padding = (int(frame_length // 2), int(frame_length // 2))
12
- y = np.pad(y, padding, mode=pad_mode)
13
-
14
- axis = -1
15
- # put our new within-frame axis at the end for now
16
- out_strides = y.strides + tuple([y.strides[axis]])
17
- # Reduce the shape on the framing axis
18
- x_shape_trimmed = list(y.shape)
19
- x_shape_trimmed[axis] -= frame_length - 1
20
- out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
21
- xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
22
- if axis < 0:
23
- target_axis = axis - 1
24
- else:
25
- target_axis = axis + 1
26
- xw = np.moveaxis(xw, -1, target_axis)
27
- # Downsample along the target axis
28
- slices = [slice(None)] * xw.ndim
29
- slices[axis] = slice(0, None, hop_length)
30
- x = xw[tuple(slices)]
31
-
32
- # Calculate power
33
- power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
34
-
35
- return np.sqrt(power)
36
-
37
-
38
- class Slicer:
39
- def __init__(
40
- self,
41
- sr: int,
42
- threshold: float = -40.0,
43
- min_length: int = 5000,
44
- min_interval: int = 300,
45
- hop_size: int = 20,
46
- max_sil_kept: int = 5000,
47
- ):
48
- if not min_length >= min_interval >= hop_size:
49
- raise ValueError(
50
- "The following condition must be satisfied: min_length >= min_interval >= hop_size"
51
- )
52
- if not max_sil_kept >= hop_size:
53
- raise ValueError(
54
- "The following condition must be satisfied: max_sil_kept >= hop_size"
55
- )
56
- min_interval = sr * min_interval / 1000
57
- self.threshold = 10 ** (threshold / 20.0)
58
- self.hop_size = round(sr * hop_size / 1000)
59
- self.win_size = min(round(min_interval), 4 * self.hop_size)
60
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
61
- self.min_interval = round(min_interval / self.hop_size)
62
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
63
-
64
- def _apply_slice(self, waveform, begin, end):
65
- if len(waveform.shape) > 1:
66
- return waveform[
67
- :, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
68
- ]
69
- else:
70
- return waveform[
71
- begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
72
- ]
73
-
74
- # @timeit
75
- def slice(self, waveform):
76
- if len(waveform.shape) > 1:
77
- samples = waveform.mean(axis=0)
78
- else:
79
- samples = waveform
80
- if samples.shape[0] <= self.min_length:
81
- return [waveform]
82
- rms_list = get_rms(
83
- y=samples, frame_length=self.win_size, hop_length=self.hop_size
84
- ).squeeze(0)
85
- sil_tags = []
86
- silence_start = None
87
- clip_start = 0
88
- for i, rms in enumerate(rms_list):
89
- # Keep looping while frame is silent.
90
- if rms < self.threshold:
91
- # Record start of silent frames.
92
- if silence_start is None:
93
- silence_start = i
94
- continue
95
- # Keep looping while frame is not silent and silence start has not been recorded.
96
- if silence_start is None:
97
- continue
98
- # Clear recorded silence start if interval is not enough or clip is too short
99
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
100
- need_slice_middle = (
101
- i - silence_start >= self.min_interval
102
- and i - clip_start >= self.min_length
103
- )
104
- if not is_leading_silence and not need_slice_middle:
105
- silence_start = None
106
- continue
107
- # Need slicing. Record the range of silent frames to be removed.
108
- if i - silence_start <= self.max_sil_kept:
109
- pos = rms_list[silence_start : i + 1].argmin() + silence_start
110
- if silence_start == 0:
111
- sil_tags.append((0, pos))
112
- else:
113
- sil_tags.append((pos, pos))
114
- clip_start = pos
115
- elif i - silence_start <= self.max_sil_kept * 2:
116
- pos = rms_list[
117
- i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
118
- ].argmin()
119
- pos += i - self.max_sil_kept
120
- pos_l = (
121
- rms_list[
122
- silence_start : silence_start + self.max_sil_kept + 1
123
- ].argmin()
124
- + silence_start
125
- )
126
- pos_r = (
127
- rms_list[i - self.max_sil_kept : i + 1].argmin()
128
- + i
129
- - self.max_sil_kept
130
- )
131
- if silence_start == 0:
132
- sil_tags.append((0, pos_r))
133
- clip_start = pos_r
134
- else:
135
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
136
- clip_start = max(pos_r, pos)
137
- else:
138
- pos_l = (
139
- rms_list[
140
- silence_start : silence_start + self.max_sil_kept + 1
141
- ].argmin()
142
- + silence_start
143
- )
144
- pos_r = (
145
- rms_list[i - self.max_sil_kept : i + 1].argmin()
146
- + i
147
- - self.max_sil_kept
148
- )
149
- if silence_start == 0:
150
- sil_tags.append((0, pos_r))
151
- else:
152
- sil_tags.append((pos_l, pos_r))
153
- clip_start = pos_r
154
- silence_start = None
155
- # Deal with trailing silence.
156
- total_frames = rms_list.shape[0]
157
- if (
158
- silence_start is not None
159
- and total_frames - silence_start >= self.min_interval
160
- ):
161
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
162
- pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
163
- sil_tags.append((pos, total_frames + 1))
164
- # Apply and return slices.
165
- if len(sil_tags) == 0:
166
- return [waveform]
167
- else:
168
- chunks = []
169
- if sil_tags[0][0] > 0:
170
- chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
171
- for i in range(len(sil_tags) - 1):
172
- chunks.append(
173
- self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])
174
- )
175
- if sil_tags[-1][1] < total_frames:
176
- chunks.append(
177
- self._apply_slice(waveform, sil_tags[-1][1], total_frames)
178
- )
179
- return chunks
180
-
181
-
182
- def main():
183
- import os.path
184
- from argparse import ArgumentParser
185
-
186
- import librosa
187
- import soundfile
188
-
189
- parser = ArgumentParser()
190
- parser.add_argument("audio", type=str, help="The audio to be sliced")
191
- parser.add_argument(
192
- "--out", type=str, help="Output directory of the sliced audio clips"
193
- )
194
- parser.add_argument(
195
- "--db_thresh",
196
- type=float,
197
- required=False,
198
- default=-40,
199
- help="The dB threshold for silence detection",
200
- )
201
- parser.add_argument(
202
- "--min_length",
203
- type=int,
204
- required=False,
205
- default=5000,
206
- help="The minimum milliseconds required for each sliced audio clip",
207
- )
208
- parser.add_argument(
209
- "--min_interval",
210
- type=int,
211
- required=False,
212
- default=300,
213
- help="The minimum milliseconds for a silence part to be sliced",
214
- )
215
- parser.add_argument(
216
- "--hop_size",
217
- type=int,
218
- required=False,
219
- default=10,
220
- help="Frame length in milliseconds",
221
- )
222
- parser.add_argument(
223
- "--max_sil_kept",
224
- type=int,
225
- required=False,
226
- default=500,
227
- help="The maximum silence length kept around the sliced clip, presented in milliseconds",
228
- )
229
- args = parser.parse_args()
230
- out = args.out
231
- if out is None:
232
- out = os.path.dirname(os.path.abspath(args.audio))
233
- audio, sr = librosa.load(args.audio, sr=None, mono=False)
234
- slicer = Slicer(
235
- sr=sr,
236
- threshold=args.db_thresh,
237
- min_length=args.min_length,
238
- min_interval=args.min_interval,
239
- hop_size=args.hop_size,
240
- max_sil_kept=args.max_sil_kept,
241
- )
242
- chunks = slicer.slice(audio)
243
- if not os.path.exists(out):
244
- os.makedirs(out)
245
- for i, chunk in enumerate(chunks):
246
- if len(chunk.shape) > 1:
247
- chunk = chunk.T
248
- soundfile.write(
249
- os.path.join(
250
- out,
251
- f"%s_%d.wav"
252
- % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i),
253
- ),
254
- chunk,
255
- sr,
256
- )
257
-
258
-
259
- if __name__ == "__main__":
260
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/stylegan2/__init__.py DELETED
File without changes
spaces/ALSv/FSW/roop/ui.py DELETED
@@ -1,231 +0,0 @@
1
- import os
2
- import webbrowser
3
- import customtkinter as ctk
4
- from typing import Callable, Tuple
5
- import cv2
6
- from PIL import Image, ImageOps
7
-
8
- import roop.globals
9
- import roop.metadata
10
- from roop.face_analyser import get_one_face
11
- from roop.capturer import get_video_frame, get_video_frame_total
12
- from roop.predictor import predict_frame
13
- from roop.processors.frame.core import get_frame_processors_modules
14
- from roop.utilities import is_image, is_video, resolve_relative_path
15
-
16
- ROOT = None
17
- ROOT_HEIGHT = 700
18
- ROOT_WIDTH = 600
19
-
20
- PREVIEW = None
21
- PREVIEW_MAX_HEIGHT = 700
22
- PREVIEW_MAX_WIDTH = 1200
23
-
24
- RECENT_DIRECTORY_SOURCE = None
25
- RECENT_DIRECTORY_TARGET = None
26
- RECENT_DIRECTORY_OUTPUT = None
27
-
28
- preview_label = None
29
- preview_slider = None
30
- source_label = None
31
- target_label = None
32
- status_label = None
33
-
34
-
35
- def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
36
- global ROOT, PREVIEW
37
-
38
- ROOT = create_root(start, destroy)
39
- PREVIEW = create_preview(ROOT)
40
-
41
- return ROOT
42
-
43
-
44
- def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
45
- global source_label, target_label, status_label
46
-
47
- ctk.deactivate_automatic_dpi_awareness()
48
- ctk.set_appearance_mode('system')
49
- ctk.set_default_color_theme(resolve_relative_path('ui.json'))
50
-
51
- root = ctk.CTk()
52
- root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
53
- root.title(f'{roop.metadata.name} {roop.metadata.version}')
54
- root.configure()
55
- root.protocol('WM_DELETE_WINDOW', lambda: destroy())
56
-
57
- source_label = ctk.CTkLabel(root, text=None)
58
- source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
59
-
60
- target_label = ctk.CTkLabel(root, text=None)
61
- target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
62
-
63
- source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path())
64
- source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
65
-
66
- target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path())
67
- target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
68
-
69
- keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
70
- keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
71
- keep_fps_checkbox.place(relx=0.1, rely=0.6)
72
-
73
- keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
74
- keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
75
- keep_frames_switch.place(relx=0.1, rely=0.65)
76
-
77
- keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio)
78
- keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get()))
79
- keep_audio_switch.place(relx=0.6, rely=0.6)
80
-
81
- many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
82
- many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
83
- many_faces_switch.place(relx=0.6, rely=0.65)
84
-
85
- start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start))
86
- start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05)
87
-
88
- stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy())
89
- stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05)
90
-
91
- preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview())
92
- preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05)
93
-
94
- status_label = ctk.CTkLabel(root, text=None, justify='center')
95
- status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
96
-
97
- donate_label = ctk.CTkLabel(root, text='^_^ Donate to project ^_^', justify='center', cursor='hand2')
98
- donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
99
- donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
100
- donate_label.bind('<Button>', lambda event: webbrowser.open('https://github.com/sponsors/s0md3v'))
101
-
102
- return root
103
-
104
-
105
- def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
106
- global preview_label, preview_slider
107
-
108
- preview = ctk.CTkToplevel(parent)
109
- preview.withdraw()
110
- preview.title('Preview')
111
- preview.configure()
112
- preview.protocol('WM_DELETE_WINDOW', lambda: toggle_preview())
113
- preview.resizable(width=False, height=False)
114
-
115
- preview_label = ctk.CTkLabel(preview, text=None)
116
- preview_label.pack(fill='both', expand=True)
117
-
118
- preview_slider = ctk.CTkSlider(preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value))
119
-
120
- return preview
121
-
122
-
123
- def update_status(text: str) -> None:
124
- status_label.configure(text=text)
125
- ROOT.update()
126
-
127
-
128
- def select_source_path() -> None:
129
- global RECENT_DIRECTORY_SOURCE
130
-
131
- PREVIEW.withdraw()
132
- source_path = ctk.filedialog.askopenfilename(title='select an source image', initialdir=RECENT_DIRECTORY_SOURCE)
133
- if is_image(source_path):
134
- roop.globals.source_path = source_path
135
- RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path)
136
- image = render_image_preview(roop.globals.source_path, (200, 200))
137
- source_label.configure(image=image)
138
- else:
139
- roop.globals.source_path = None
140
- source_label.configure(image=None)
141
-
142
-
143
- def select_target_path() -> None:
144
- global RECENT_DIRECTORY_TARGET
145
-
146
- PREVIEW.withdraw()
147
- target_path = ctk.filedialog.askopenfilename(title='select an target image or video', initialdir=RECENT_DIRECTORY_TARGET)
148
- if is_image(target_path):
149
- roop.globals.target_path = target_path
150
- RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
151
- image = render_image_preview(roop.globals.target_path, (200, 200))
152
- target_label.configure(image=image)
153
- elif is_video(target_path):
154
- roop.globals.target_path = target_path
155
- RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
156
- video_frame = render_video_preview(target_path, (200, 200))
157
- target_label.configure(image=video_frame)
158
- else:
159
- roop.globals.target_path = None
160
- target_label.configure(image=None)
161
-
162
-
163
- def select_output_path(start: Callable[[], None]) -> None:
164
- global RECENT_DIRECTORY_OUTPUT
165
-
166
- if is_image(roop.globals.target_path):
167
- output_path = ctk.filedialog.asksaveasfilename(title='save image output file', defaultextension='.png', initialfile='output.png', initialdir=RECENT_DIRECTORY_OUTPUT)
168
- elif is_video(roop.globals.target_path):
169
- output_path = ctk.filedialog.asksaveasfilename(title='save video output file', defaultextension='.mp4', initialfile='output.mp4', initialdir=RECENT_DIRECTORY_OUTPUT)
170
- else:
171
- output_path = None
172
- if output_path:
173
- roop.globals.output_path = output_path
174
- RECENT_DIRECTORY_OUTPUT = os.path.dirname(roop.globals.output_path)
175
- start()
176
-
177
-
178
- def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage:
179
- image = Image.open(image_path)
180
- if size:
181
- image = ImageOps.fit(image, size, Image.LANCZOS)
182
- return ctk.CTkImage(image, size=image.size)
183
-
184
-
185
- def render_video_preview(video_path: str, size: Tuple[int, int], frame_number: int = 0) -> ctk.CTkImage:
186
- capture = cv2.VideoCapture(video_path)
187
- if frame_number:
188
- capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
189
- has_frame, frame = capture.read()
190
- if has_frame:
191
- image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
192
- if size:
193
- image = ImageOps.fit(image, size, Image.LANCZOS)
194
- return ctk.CTkImage(image, size=image.size)
195
- capture.release()
196
- cv2.destroyAllWindows()
197
-
198
-
199
- def toggle_preview() -> None:
200
- if PREVIEW.state() == 'normal':
201
- PREVIEW.withdraw()
202
- elif roop.globals.source_path and roop.globals.target_path:
203
- init_preview()
204
- update_preview()
205
- PREVIEW.deiconify()
206
-
207
-
208
- def init_preview() -> None:
209
- if is_image(roop.globals.target_path):
210
- preview_slider.pack_forget()
211
- if is_video(roop.globals.target_path):
212
- video_frame_total = get_video_frame_total(roop.globals.target_path)
213
- preview_slider.configure(to=video_frame_total)
214
- preview_slider.pack(fill='x')
215
- preview_slider.set(0)
216
-
217
-
218
- def update_preview(frame_number: int = 0) -> None:
219
- if roop.globals.source_path and roop.globals.target_path:
220
- temp_frame = get_video_frame(roop.globals.target_path, frame_number)
221
- if predict_frame(temp_frame):
222
- quit()
223
- for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
224
- temp_frame = frame_processor.process_frame(
225
- get_one_face(cv2.imread(roop.globals.source_path)),
226
- temp_frame
227
- )
228
- image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
229
- image = ImageOps.contain(image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS)
230
- image = ctk.CTkImage(image, size=image.size)
231
- preview_label.configure(image=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/_base_/datasets/__init__.py DELETED
File without changes
spaces/Accel/media-converter/README.md DELETED
@@ -1,28 +0,0 @@
1
- ---
2
- title: FFmpeg
3
- emoji: 🔥
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.3.1
8
- app_file: app.py
9
- tags : ["ffmpeg","converter","media","processing"]
10
- pinned: true
11
- license: mit
12
- ---
13
- [![Sync to Hugging Face hub](https://github.com/lazarusking/gradio-ffmpeg/actions/workflows/main.yml/badge.svg)](https://github.com/lazarusking/gradio-ffmpeg/actions/workflows/main.yml)
14
-
15
- # Overview
16
- Simple gradio interface for ffmpeg filters and codecs
17
-
18
- ![Content](./images/gradio-app.png)
19
- ## Dev
20
- Built with Gradio and ffmpy
21
-
22
- inspiration by [ffmpeg-commander](https://www.github.com/alfg/ffmpeg-commander)
23
- # Install
24
- Clone the repo and
25
- `pip install -r requirements.txt`
26
- - To run locally
27
-
28
- `gradio app.py`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/evaluator/basic.py DELETED
@@ -1,64 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List, Tuple
4
-
5
- from . import evaluator_registry
6
- from .base import BaseEvaluator
7
-
8
- if TYPE_CHECKING:
9
- from agentverse.agents import EvaluatorAgent
10
- from agentverse.message import EvaluatorMessage, SolverMessage, ExecutorMessage
11
-
12
-
13
- @evaluator_registry.register("basic")
14
- class BasicEvaluator(BaseEvaluator):
15
- cnt_agents: int = 0
16
-
17
- def step(
18
- self,
19
- agent: EvaluatorAgent,
20
- solution: List[SolverMessage],
21
- result: List[ExecutorMessage],
22
- task_description: str,
23
- all_role_description: List[str],
24
- *args,
25
- **kwargs,
26
- ) -> EvaluatorMessage:
27
- flatten_solution = "\n".join([s.content for s in solution])
28
- flatten_result = "\n".join([r.content for r in result])
29
- flatten_all_role_description = "\n".join(all_role_description)
30
- evaluation = agent.step(
31
- flatten_solution,
32
- flatten_result,
33
- task_description,
34
- flatten_all_role_description,
35
- )
36
- return evaluation
37
-
38
-
39
- @evaluator_registry.register("basic-message")
40
- class BasicEvaluator(BaseEvaluator):
41
- cnt_agents: int = 0
42
-
43
- def step(
44
- self,
45
- agent: EvaluatorAgent,
46
- solution: List[SolverMessage],
47
- result: List[ExecutorMessage],
48
- task_description: str,
49
- all_role_description: List[str],
50
- *args,
51
- **kwargs,
52
- ) -> EvaluatorMessage:
53
- flatten_solution = "\n".join([s.content for s in solution])
54
- flatten_result = "\n".join([r.content for r in result])
55
- flatten_all_role_description = "\n".join(all_role_description)
56
- agent.add_message_to_memory(result)
57
- evaluation = agent.step(
58
- flatten_solution,
59
- flatten_result,
60
- task_description,
61
- flatten_all_role_description,
62
- )
63
- agent.add_message_to_memory([evaluation])
64
- return evaluation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetExpandedChildHeight.js DELETED
@@ -1,6 +0,0 @@
1
- // Override
2
- var GetExpandedChildHeight = function (child, parentHeight) {
3
- return parentHeight;
4
- }
5
-
6
- export default GetExpandedChildHeight;
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import ConfirmDialog from './ConfirmDialog.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('confirmDialog', function (config, creators) {
6
- var gameObject = new ConfirmDialog(this.scene, config, creators);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.ConfirmDialog', ConfirmDialog);
12
-
13
- export default ConfirmDialog;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alisonbakers/Fml/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Fml
3
- emoji: 🏆
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/util/html.py DELETED
@@ -1,86 +0,0 @@
1
- import dominate
2
- from dominate.tags import meta, h3, table, tr, td, p, a, img, br
3
- import os
4
-
5
-
6
- class HTML:
7
- """This HTML class allows us to save images and write texts into a single HTML file.
8
-
9
- It consists of functions such as <add_header> (add a text header to the HTML file),
10
- <add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
11
- It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
12
- """
13
-
14
- def __init__(self, web_dir, title, refresh=0):
15
- """Initialize the HTML classes
16
-
17
- Parameters:
18
- web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
19
- title (str) -- the webpage name
20
- refresh (int) -- how often the website refresh itself; if 0; no refreshing
21
- """
22
- self.title = title
23
- self.web_dir = web_dir
24
- self.img_dir = os.path.join(self.web_dir, 'images')
25
- if not os.path.exists(self.web_dir):
26
- os.makedirs(self.web_dir)
27
- if not os.path.exists(self.img_dir):
28
- os.makedirs(self.img_dir)
29
-
30
- self.doc = dominate.document(title=title)
31
- if refresh > 0:
32
- with self.doc.head:
33
- meta(http_equiv="refresh", content=str(refresh))
34
-
35
- def get_image_dir(self):
36
- """Return the directory that stores images"""
37
- return self.img_dir
38
-
39
- def add_header(self, text):
40
- """Insert a header to the HTML file
41
-
42
- Parameters:
43
- text (str) -- the header text
44
- """
45
- with self.doc:
46
- h3(text)
47
-
48
- def add_images(self, ims, txts, links, width=400):
49
- """add images to the HTML file
50
-
51
- Parameters:
52
- ims (str list) -- a list of image paths
53
- txts (str list) -- a list of image names shown on the website
54
- links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
55
- """
56
- self.t = table(border=1, style="table-layout: fixed;") # Insert a table
57
- self.doc.add(self.t)
58
- with self.t:
59
- with tr():
60
- for im, txt, link in zip(ims, txts, links):
61
- with td(style="word-wrap: break-word;", halign="center", valign="top"):
62
- with p():
63
- with a(href=os.path.join('images', link)):
64
- img(style="width:%dpx" % width, src=os.path.join('images', im))
65
- br()
66
- p(txt)
67
-
68
- def save(self):
69
- """save the current content to the HMTL file"""
70
- html_file = '%s/index.html' % self.web_dir
71
- f = open(html_file, 'wt')
72
- f.write(self.doc.render())
73
- f.close()
74
-
75
-
76
- if __name__ == '__main__': # we show an example usage here.
77
- html = HTML('web/', 'test_html')
78
- html.add_header('hello world')
79
-
80
- ims, txts, links = [], [], []
81
- for n in range(4):
82
- ims.append('image_%d.png' % n)
83
- txts.append('text_%d' % n)
84
- links.append('image_%d.png' % n)
85
- html.add_images(ims, txts, links)
86
- html.save()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/self_analysis.md DELETED
@@ -1,175 +0,0 @@
1
- # chatgpt-academic项目自译解报告
2
- (Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
3
-
4
- ## [0/18] 程序摘要: functional_crazy.py
5
-
6
- 这是一个功能扩展的程序,文件名为 `functional_crazy.py`。代码的主要功能是通过提供一系列函数插件,增强程序的功能,让用户可以通过界面中的按钮,快速调用对应的函数插件实现相应的操作。代码中使用了 `HotReload` 函数插件,可以在不重启程序的情况下更新函数插件的代码,让其生效。同时,通过 `UserVisibleLevel` 变量的设置,可以控制哪些插件会在UI界面显示出来。函数插件列表包括了以下功能:解析项目本身、解析一个Python项目、解析一个C++项目头文件、解析一个C++项目、读取文章并生成摘要、批量生成函数注释、全项目切换成英文、批量总结PDF文档、批量总结PDF文档pdfminer、批量总结Word文档、高阶功能模板函数、以及其他未经充分测试的函数插件。
7
-
8
- ## [1/18] 程序摘要: main.py
9
-
10
- 该程序是一个基于Gradio构建的对话生成模型的Web界面示例,包含了以下主要功能:
11
-
12
- 1.加载模型并对用户输入进行响应;
13
- 2.通过调用外部函数库来获取用户的输入,并在模型生成的过程中进行处理;
14
- 3.支持用户上传本地文件,供外部函数库调用;
15
- 4.支持停止当前的生成过程;
16
- 5.保存用户的历史记录,并将其记录在本地日志文件中,以供后续分析和使用。
17
-
18
- 该程序需要依赖于一些外部库和软件包,如Gradio、torch等。用户需要确保这些依赖项已经安装,并且在运行该程序前对config_private.py配置文件进行相应的修改。
19
-
20
- ## [2/18] 程序摘要: functional.py
21
-
22
- 该文件定义了一个名为“functional”的函数,函数的作用是返回一个包含多个字典(键值对)的字典,每个键值对表示一种功能。该字典的键值由功能名称和对应的数据组成。其中的每个字典都包含4个键值对,分别为“Prefix”、“Suffix”、“Color”和“PreProcess”,分别表示前缀、后缀、按钮颜色和预处理函数。如果某些键值对没有给出,那么程序中默认相应的值,如按钮颜色默认为“secondary”等。每个功能描述了不同的学术润色/翻译/其他服务,如“英语学术润色”、“中文学术润色”、“查找语法错误”等。函数还引用了一个名为“clear_line_break”的函数,用于预处理修改前的文本。
23
-
24
- ## [3/18] 程序摘要: show_math.py
25
-
26
- 该程序文件名为show_math.py,主要用途是将Markdown和LaTeX混合格式转换成带有MathML的HTML格式。该程序通过递归地处理LaTeX和Markdown混合段落逐一转换成HTML/MathML标记出来,并在LaTeX公式创建中进行错误处理。在程序文件中定义了3个变量,分别是incomplete,convError和convert,其中convert函数是用来执行转换的主要函数。程序使用正则表达式进行LaTeX格式和Markdown段落的分割,从而实现转换。如果在Latex转换过程中发生错误,程序将输出相应的错误信息。
27
-
28
- ## [4/18] 程序摘要: predict.py
29
-
30
- 本程序文件的文件名为"./predict.py",主要包含三个函数:
31
-
32
- 1. predict:正常对话时使用,具备完备的交互功能,不可多线程;
33
- 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑;
34
- 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程。
35
-
36
- 其中,predict函数用于基础的对话功能,发送至chatGPT,流式获取输出,根据点击的哪个按钮,进行对话预处理等额外操作;predict_no_ui函数用于payload比较大的情况,或者用于实现多线、带嵌套的复杂功能;predict_no_ui_long_connection实现调用predict_no_ui处理长文档时,避免连接断掉的情况,支持多线程。
37
-
38
- ## [5/18] 程序摘要: check_proxy.py
39
-
40
- 该程序文件名为check_proxy.py,主要功能是检查代理服务器的可用性并返回代理服务器的地理位置信息或错误提示。具体实现方式如下:
41
-
42
- 首先使用requests模块向指定网站(https://ipapi.co/json/)发送GET请求,请求结果以JSON格式返回。如果代理服务器参数(proxies)是有效的且没有指明'https'代理,则用默认字典值'无'替代。
43
-
44
- 然后,程序会解析返回的JSON数据,并根据数据中是否包含国家名字字段来判断代理服务器的地理位置。如果有国家名字字段,则将其打印出来并返回代理服务器的相关信息。如果没有国家名字字段,但有错误信息字段,则返回其他错误提示信息。
45
-
46
- 在程序执行前,程序会先设置环境变量no_proxy,并使用toolbox模块中的get_conf函数从配置文件中读取代理参数。
47
-
48
- 最后,检测程序会输出检查结果并返回对应的结果字符串。
49
-
50
- ## [6/18] 程序摘要: config_private.py
51
-
52
- 本程序文件名为`config_private.py`,其功能为配置私有信息以便在主程序中使用。主要功能包括:
53
-
54
- - 配置OpenAI API的密钥和API URL
55
- - 配置是否使用代理,如果使用代理配置代理地址和端口
56
- - 配置发送请求的超时时间和失败重试次数的限制
57
- - 配置并行使用线程数和用户名密码
58
- - 提供检查功能以确保API密钥已经正确设置
59
-
60
- 其中,需要特别注意的是:最后一个检查功能要求在运行之前必须将API密钥正确设置,否则程序会直接退出。
61
-
62
- ## [7/18] 程序摘要: config.py
63
-
64
- 该程序文件是一个配置文件,用于配置OpenAI的API参数和优化体验的相关参数,具体包括以下几个步骤:
65
-
66
- 1.设置OpenAI的API密钥。
67
-
68
- 2.选择是否使用代理,如果使用则需要设置代理地址和端口等参数。
69
-
70
- 3.设置请求OpenAI后的超时时间、网页的端口、重试次数、选择的OpenAI模型、API的网址等。
71
-
72
- 4.设置并行使用的线程数和用户名密码。
73
-
74
- 该程序文件的作用为在使用OpenAI API时进行相关参数的配置,以保证请求的正确性和速度,并且优化使用体验。
75
-
76
- ## [8/18] 程序摘要: theme.py
77
-
78
- 该程序是一个自定义Gradio主题的Python模块。主题文件名为"./theme.py"。程序引入了Gradio模块,并定义了一个名为"adjust_theme()"的函数。该函数根据输入值调整Gradio的默认主题,返回一个包含所需自定义属性的主题对象。主题属性包括颜色、字体、过渡、阴影、按钮边框和渐变等。主题颜色列表包括石板色、灰色、锌色、中性色、石头色、红色、橙色、琥珀色、黄色、酸橙色、绿色、祖母绿、青蓝色、青色、天蓝色、蓝色、靛蓝色、紫罗兰色、紫色、洋红色、粉红色和玫瑰色。如果Gradio版本较旧,则不能自定义字体和颜色。
79
-
80
- ## [9/18] 程序摘要: toolbox.py
81
-
82
- 该程序文件包含了一系列函数,用于实现聊天程序所需的各种功能,如预测对话、将对话记录写入文件、将普通文本转换为Markdown格式文本、装饰器函数CatchException和HotReload等。其中一些函数用到了第三方库,如Python-Markdown、mdtex2html、zipfile、tarfile、rarfile和py7zr。除此之外,还有一些辅助函数,如get_conf、clear_line_break和extract_archive等。主要功能包括:
83
-
84
- 1. 导入markdown、mdtex2html、threading、functools等模块。
85
- 2. 定义函数predict_no_ui_but_counting_down,用于生成对话。
86
- 3. 定义函数write_results_to_file,用于将对话记录生成Markdown文件。
87
- 4. 定义函数regular_txt_to_markdown,将普通文本转换为Markdown格式的文本。
88
- 5. 定义装饰器函数CatchException,用于捕获函数执行异常并返回生成器。
89
- 6. 定义函数report_execption,用于向chatbot中添加错误信息。
90
- 7. 定义函数text_divide_paragraph,用于将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
91
- 8. 定义函数markdown_convertion,用于将Markdown格式的文本转换为HTML格式。
92
- 9. 定义函数format_io,用于将输入和输出解析为HTML格式。
93
- 10. 定义函数find_free_port,用于返回当前系统中可用的未使用端口。
94
- 11. 定义函数extract_archive,用于解压归档文件。
95
- 12. 定义函数find_recent_files,用于查找最近创建的文件。
96
- 13. 定义函数on_file_uploaded,用于处理上传文件的操作。
97
- 14. 定义函数on_report_generated,用于处理生成报告文件的操作。
98
-
99
-
100
- ## [10/18] 程序摘要: crazy_functions/生成函数注释.py
101
-
102
- 该程序文件是一个Python脚本,文件名为“生成函数注释.py”,位于“./crazy_functions/”目录下。该程序实现了一个批量生成函数注释的功能,可以对指定文件夹下的所有Python和C++源代码文件中的所有函数进行注释,使用Markdown表格输出注释结果。
103
-
104
- 该程序引用了predict.py和toolbox.py两个模块,其中predict.py实现了一个基于GPT模型的文本生成功能,用于生成函数注释,而toolbox.py实现了一些工具函数,包括异常处理函数、文本写入函数等。另外,该程序还定义了两个函数,一个是“生成函数注释”函数,用于处理单个文件的注释生成;另一个是“批量生成函数注释”函数,用于批量处理多个文件的注释生成。
105
-
106
- ## [11/18] 程序摘要: crazy_functions/读文章写摘要.py
107
-
108
- 这个程序文件是一个名为“读文章写摘要”的函数。该函数的输入包括文章的文本内容、top_p(生成文本时选择最可能的词语的概率阈值)、temperature(控制生成文本的随机性的因子)、对话历史等参数,以及一个聊天机器人和一个系统提示的文本。该函数的主要工作是解析一组.tex文件,���后生成一段学术性语言的中文和英文摘要。在解析过程中,该函数使用一个名为“toolbox”的模块中的辅助函数和一个名为“predict”的模块中的函数来执行GPT-2模型的推理工作,然后将结果返回给聊天机器人。另外,该程序还包括一个名为“fast_debug”的bool型变量,用于调试和测试。
109
-
110
- ## [12/18] 程序摘要: crazy_functions/代码重写为全英文_多线程.py
111
-
112
- 该程序文件实现了一个多线程操作,用于将指定目录下的所有 Python 文件中的中文转化为英文,并将转化后的文件存入另一个目录中。具体实现过程如下:
113
-
114
- 1. 集合目标文件路径并清空历史记录。
115
- 2. 循环目标文件,对每个文件启动一个线程进行任务操作。
116
- 3. 各个线程同时开始执行任务函数,并在任务完成后将转化后的文件写入指定目录,最终生成一份任务执行报告。
117
-
118
- ## [13/18] 程序摘要: crazy_functions/高级功能函数模板.py
119
-
120
- 该程序文件名为高级功能函数模板.py,它包含了一个名为“高阶功能模板函数”的函数,这个函数可以作为开发新功能函数的模板。该函数引用了predict.py和toolbox.py文件中的函数。在该函数内部,它首先清空了历史记录,然后对于今天和今天以后的四天,它问用户历史中哪些事件发生在这些日期,并列举两条事件并发送相关的图片。在向用户询问问题时,使用了GPT进行响应。由于请求GPT需要一定的时间,所以函数会在重新显示状态之前等待一段时间。在每次与用户的互动中,使用yield关键字生成器函数来输出聊天机器人的当前状态,包括聊天消息、历史记录和状态('正常')。最后,程序调用write_results_to_file函数将聊天的结果写入文件,以供后续的评估和分析。
121
-
122
- ## [14/18] 程序摘要: crazy_functions/总结word文档.py
123
-
124
- 该程序文件名为总结word文档.py,主要功能是批量总结Word文档。具体实现过程是解析docx格式和doc格式文件,生成文件内容,然后使用自然语言处理工具对文章内容做中英文概述,最后给出建议。该程序需要依赖python-docx和pywin32,如果没有安装,会给出安装建议。
125
-
126
- ## [15/18] 程序摘要: crazy_functions/批量总结PDF文档pdfminer.py
127
-
128
- 该程序文件名为pdfminer.py,位于./crazy_functions/目录下。程序实现了批量读取PDF文件,并使用pdfminer解析PDF文件内容。此外,程序还根据解析得到的文本内容,调用机器学习模型生成对每篇文章的概述,最终生成全文摘要。程序中还对模块依赖进行了导入检查,若缺少依赖,则会提供安装建议。
129
-
130
- ## [16/18] 程序摘要: crazy_functions/解析项目源代码.py
131
-
132
- 这个程序文件中包含了几个函数,分别是:
133
-
134
- 1. `解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)`:通过输入文件路径列表对程序文件进行逐文件分析,根据分析结果做出整体功能和构架的概括,并生成包括每个文件功能的markdown表格。
135
- 2. `解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对当前文件夹下的所有Python文件及其子文件夹进行逐文件分析,并生成markdown表格。
136
- 3. `解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有Python文件及其子文件夹进行逐文件分析,并生成markdown表格。
137
- 4. `解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有头文件进行逐文件分析,并生成markdown表格。
138
- 5. `解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)`:对指定路径下的所有.h、.cpp、.c文件及其子文件夹进行逐文件分析,并生成markdown表格。
139
-
140
- 程序中还包含了一些辅助函数和变量,如CatchException装饰器函数,report_execption函数、write_results_to_file函数等。在执行过程中还会调用其他模块中的函数,如toolbox模块的函数和predict模块的函数。
141
-
142
- ## [17/18] 程序摘要: crazy_functions/批量总结PDF文档.py
143
-
144
- 这个程序文件是一个名为“批量总结PDF文档”的函数插件。它导入了predict和toolbox模块,并定义了一些函数,包括is_paragraph_break,normalize_text和clean_text。这些函数是对输入文本进行预处理和清洗的功能函数。主要的功能函数是解析PDF,它打开每个PDF文件并将其内容存储在file_content变量中,然后传递给聊天机器人,以产生一句话的概括。在解析PDF文件之后,该函数连接了所有文件的摘要,以产生一段学术语言和英文摘要。最后,函数批量处理目标文件夹中的所有PDF文件,并输出结果。
145
-
146
- ## 根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能。
147
-
148
- 该程序是一个聊天机器人,使用了OpenAI的GPT语言模型以及一些特殊的辅助功能去处理各种学术写作和科研润色任务。整个程序由一些函数组成,每个函数都代表了不同的学术润色/翻译/其他服务。
149
-
150
- 下面是程序中每个文件的功能列表:
151
-
152
- | 文件名 | 功能 |
153
- |--------|--------|
154
- | functional_crazy.py | 实现高级功能函数模板和其他一些辅助功能函数 |
155
- | main.py | 程序的主要入口,负责程序的启动和UI的展示 |
156
- | functional.py | 定义各种功能按钮的颜色和响应函数 |
157
- | show_math.py | 解析LaTeX文本,将其转换为Markdown格式 |
158
- | predict.py | 基础的对话功能,用于与chatGPT进行交互 |
159
- | check_proxy.py | 检查代理设置的正确性 |
160
- | config_private.py | 配置程序的API密钥和其他私有信息 |
161
- | config.py | 配置OpenAI的API参数和程序的其他属性 |
162
- | theme.py | 设置程序主题样式 |
163
- | toolbox.py | 存放一些辅助函数供程序使用 |
164
- | crazy_functions/生成函数注释.py | 生成Python文件中所有函数的注释 |
165
- | crazy_functions/读文章写摘要.py | 解析文章文本,生成中英文摘要 |
166
- | crazy_functions/代码重写为全英文_多线程.py | 将中文代码内容转化为英文 |
167
- | crazy_functions/高级功能函数模板.py | 实现高级功能函数模板 |
168
- | crazy_functions/总结word文档.py | 解析Word文件,生成文章内容的概要 |
169
- | crazy_functions/批量总结PDF文档pdfminer.py | 解析PDF文件,生成文章内容的概要(使用pdfminer库) |
170
- | crazy_functions/批量总结PDF文档.py | 解析PDF文件,生成文章内容的概要(使用PyMuPDF库) |
171
- | crazy_functions/解析项目源代码.py | 解析C/C++源代码,生成markdown表格 |
172
- | crazy_functions/批量总结PDF文档.py | 对PDF文件进行批量摘要生成 |
173
-
174
- 总的来说,该程序提供了一系列的学术润色和翻译的工具,支持对各种类型的文件进行分析和处理。同时也提供了对话式用户界面,便于用户使用和交互。
175
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.py DELETED
@@ -1,212 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Custom PyTorch ops for efficient bias and activation."""
10
-
11
- import os
12
- import warnings
13
- import numpy as np
14
- import torch
15
- import dnnlib
16
- import traceback
17
-
18
- from .. import custom_ops
19
- from .. import misc
20
-
21
- #----------------------------------------------------------------------------
22
-
23
- activation_funcs = {
24
- 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
25
- 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
26
- 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
27
- 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
28
- 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
29
- 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
30
- 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
31
- 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
32
- 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
33
- }
34
-
35
- #----------------------------------------------------------------------------
36
-
37
- _inited = False
38
- _plugin = None
39
- _null_tensor = torch.empty([0])
40
-
41
- def _init():
42
- global _inited, _plugin
43
- if not _inited:
44
- _inited = True
45
- sources = ['bias_act.cpp', 'bias_act.cu']
46
- sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
47
- try:
48
- _plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
49
- except:
50
- warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
51
- return _plugin is not None
52
-
53
- #----------------------------------------------------------------------------
54
-
55
- def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
56
- r"""Fused bias and activation function.
57
-
58
- Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
59
- and scales the result by `gain`. Each of the steps is optional. In most cases,
60
- the fused op is considerably more efficient than performing the same calculation
61
- using standard PyTorch ops. It supports first and second order gradients,
62
- but not third order gradients.
63
-
64
- Args:
65
- x: Input activation tensor. Can be of any shape.
66
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
67
- as `x`. The shape must be known, and it must match the dimension of `x`
68
- corresponding to `dim`.
69
- dim: The dimension in `x` corresponding to the elements of `b`.
70
- The value of `dim` is ignored if `b` is not specified.
71
- act: Name of the activation function to evaluate, or `"linear"` to disable.
72
- Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
73
- See `activation_funcs` for a full list. `None` is not allowed.
74
- alpha: Shape parameter for the activation function, or `None` to use the default.
75
- gain: Scaling factor for the output tensor, or `None` to use default.
76
- See `activation_funcs` for the default scaling of each activation function.
77
- If unsure, consider specifying 1.
78
- clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
79
- the clamping (default).
80
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
81
-
82
- Returns:
83
- Tensor of the same shape and datatype as `x`.
84
- """
85
- assert isinstance(x, torch.Tensor)
86
- assert impl in ['ref', 'cuda']
87
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
88
- return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
89
- return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
90
-
91
- #----------------------------------------------------------------------------
92
-
93
- @misc.profiled_function
94
- def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
95
- """Slow reference implementation of `bias_act()` using standard TensorFlow ops.
96
- """
97
- assert isinstance(x, torch.Tensor)
98
- assert clamp is None or clamp >= 0
99
- spec = activation_funcs[act]
100
- alpha = float(alpha if alpha is not None else spec.def_alpha)
101
- gain = float(gain if gain is not None else spec.def_gain)
102
- clamp = float(clamp if clamp is not None else -1)
103
-
104
- # Add bias.
105
- if b is not None:
106
- assert isinstance(b, torch.Tensor) and b.ndim == 1
107
- assert 0 <= dim < x.ndim
108
- assert b.shape[0] == x.shape[dim]
109
- x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
110
-
111
- # Evaluate activation function.
112
- alpha = float(alpha)
113
- x = spec.func(x, alpha=alpha)
114
-
115
- # Scale by gain.
116
- gain = float(gain)
117
- if gain != 1:
118
- x = x * gain
119
-
120
- # Clamp.
121
- if clamp >= 0:
122
- x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
123
- return x
124
-
125
- #----------------------------------------------------------------------------
126
-
127
- _bias_act_cuda_cache = dict()
128
-
129
- def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
130
- """Fast CUDA implementation of `bias_act()` using custom ops.
131
- """
132
- # Parse arguments.
133
- assert clamp is None or clamp >= 0
134
- spec = activation_funcs[act]
135
- alpha = float(alpha if alpha is not None else spec.def_alpha)
136
- gain = float(gain if gain is not None else spec.def_gain)
137
- clamp = float(clamp if clamp is not None else -1)
138
-
139
- # Lookup from cache.
140
- key = (dim, act, alpha, gain, clamp)
141
- if key in _bias_act_cuda_cache:
142
- return _bias_act_cuda_cache[key]
143
-
144
- # Forward op.
145
- class BiasActCuda(torch.autograd.Function):
146
- @staticmethod
147
- def forward(ctx, x, b): # pylint: disable=arguments-differ
148
- ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
149
- x = x.contiguous(memory_format=ctx.memory_format)
150
- b = b.contiguous() if b is not None else _null_tensor
151
- y = x
152
- if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
153
- y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
154
- ctx.save_for_backward(
155
- x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
156
- b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
157
- y if 'y' in spec.ref else _null_tensor)
158
- return y
159
-
160
- @staticmethod
161
- def backward(ctx, dy): # pylint: disable=arguments-differ
162
- dy = dy.contiguous(memory_format=ctx.memory_format)
163
- x, b, y = ctx.saved_tensors
164
- dx = None
165
- db = None
166
-
167
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
168
- dx = dy
169
- if act != 'linear' or gain != 1 or clamp >= 0:
170
- dx = BiasActCudaGrad.apply(dy, x, b, y)
171
-
172
- if ctx.needs_input_grad[1]:
173
- db = dx.sum([i for i in range(dx.ndim) if i != dim])
174
-
175
- return dx, db
176
-
177
- # Backward op.
178
- class BiasActCudaGrad(torch.autograd.Function):
179
- @staticmethod
180
- def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
181
- ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
182
- dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
183
- ctx.save_for_backward(
184
- dy if spec.has_2nd_grad else _null_tensor,
185
- x, b, y)
186
- return dx
187
-
188
- @staticmethod
189
- def backward(ctx, d_dx): # pylint: disable=arguments-differ
190
- d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
191
- dy, x, b, y = ctx.saved_tensors
192
- d_dy = None
193
- d_x = None
194
- d_b = None
195
- d_y = None
196
-
197
- if ctx.needs_input_grad[0]:
198
- d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
199
-
200
- if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
201
- d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
202
-
203
- if spec.has_2nd_grad and ctx.needs_input_grad[2]:
204
- d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
205
-
206
- return d_dy, d_x, d_b, d_y
207
-
208
- # Add to cache.
209
- _bias_act_cuda_cache[key] = BiasActCuda
210
- return BiasActCuda
211
-
212
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_act.py DELETED
@@ -1,32 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from torch.autograd import Function
7
-
8
-
9
- module_path = os.path.dirname(__file__)
10
-
11
-
12
- class FusedLeakyReLU(nn.Module):
13
- def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
14
- super().__init__()
15
-
16
- self.bias = nn.Parameter(torch.zeros(channel))
17
- self.negative_slope = negative_slope
18
- self.scale = scale
19
-
20
- def forward(self, input):
21
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
22
-
23
-
24
- def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
25
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
26
- input = input.cuda()
27
- return (
28
- F.leaky_relu(
29
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope
30
- )
31
- * scale
32
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py DELETED
@@ -1,69 +0,0 @@
1
- _base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w32',
4
- backbone=dict(
5
- _delete_=True,
6
- type='HRNet',
7
- extra=dict(
8
- stage1=dict(
9
- num_modules=1,
10
- num_branches=1,
11
- block='BOTTLENECK',
12
- num_blocks=(4, ),
13
- num_channels=(64, )),
14
- stage2=dict(
15
- num_modules=1,
16
- num_branches=2,
17
- block='BASIC',
18
- num_blocks=(4, 4),
19
- num_channels=(32, 64)),
20
- stage3=dict(
21
- num_modules=4,
22
- num_branches=3,
23
- block='BASIC',
24
- num_blocks=(4, 4, 4),
25
- num_channels=(32, 64, 128)),
26
- stage4=dict(
27
- num_modules=3,
28
- num_branches=4,
29
- block='BASIC',
30
- num_blocks=(4, 4, 4, 4),
31
- num_channels=(32, 64, 128, 256)))),
32
- neck=dict(
33
- _delete_=True,
34
- type='HRFPN',
35
- in_channels=[32, 64, 128, 256],
36
- out_channels=256,
37
- stride=2,
38
- num_outs=5))
39
- img_norm_cfg = dict(
40
- mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
41
- train_pipeline = [
42
- dict(type='LoadImageFromFile'),
43
- dict(type='LoadAnnotations', with_bbox=True),
44
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
45
- dict(type='RandomFlip', flip_ratio=0.5),
46
- dict(type='Normalize', **img_norm_cfg),
47
- dict(type='Pad', size_divisor=32),
48
- dict(type='DefaultFormatBundle'),
49
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
50
- ]
51
- test_pipeline = [
52
- dict(type='LoadImageFromFile'),
53
- dict(
54
- type='MultiScaleFlipAug',
55
- img_scale=(1333, 800),
56
- flip=False,
57
- transforms=[
58
- dict(type='Resize', keep_ratio=True),
59
- dict(type='RandomFlip'),
60
- dict(type='Normalize', **img_norm_cfg),
61
- dict(type='Pad', size_divisor=32),
62
- dict(type='ImageToTensor', keys=['img']),
63
- dict(type='Collect', keys=['img']),
64
- ])
65
- ]
66
- data = dict(
67
- train=dict(pipeline=train_pipeline),
68
- val=dict(pipeline=test_pipeline),
69
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/scnet.py DELETED
@@ -1,10 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .cascade_rcnn import CascadeRCNN
3
-
4
-
5
- @DETECTORS.register_module()
6
- class SCNet(CascadeRCNN):
7
- """Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
8
-
9
- def __init__(self, **kwargs):
10
- super(SCNet, self).__init__(**kwargs)
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/slurm_train.sh DELETED
@@ -1,24 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- set -x
4
-
5
- PARTITION=$1
6
- JOB_NAME=$2
7
- CONFIG=$3
8
- WORK_DIR=$4
9
- GPUS=${GPUS:-8}
10
- GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11
- CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12
- SRUN_ARGS=${SRUN_ARGS:-""}
13
- PY_ARGS=${@:5}
14
-
15
- PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16
- srun -p ${PARTITION} \
17
- --job-name=${JOB_NAME} \
18
- --gres=gpu:${GPUS_PER_NODE} \
19
- --ntasks=${GPUS} \
20
- --ntasks-per-node=${GPUS_PER_NODE} \
21
- --cpus-per-task=${CPUS_PER_TASK} \
22
- --kill-on-bad-exit=1 \
23
- ${SRUN_ARGS} \
24
- python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
 
 
 
 
 
spaces/Ankita0512ghosh/Weather_bot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Weather Bot
3
- emoji: 🐢
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/three_nn.py DELETED
@@ -1,51 +0,0 @@
1
- from typing import Tuple
2
-
3
- import torch
4
- from torch.autograd import Function
5
-
6
- from ..utils import ext_loader
7
-
8
- ext_module = ext_loader.load_ext('_ext', ['three_nn_forward'])
9
-
10
-
11
- class ThreeNN(Function):
12
- """Find the top-3 nearest neighbors of the target set from the source set.
13
-
14
- Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_
15
- for more details.
16
- """
17
-
18
- @staticmethod
19
- def forward(ctx, target: torch.Tensor,
20
- source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
21
- """
22
- Args:
23
- target (Tensor): shape (B, N, 3), points set that needs to
24
- find the nearest neighbors.
25
- source (Tensor): shape (B, M, 3), points set that is used
26
- to find the nearest neighbors of points in target set.
27
-
28
- Returns:
29
- Tensor: shape (B, N, 3), L2 distance of each point in target
30
- set to their corresponding nearest neighbors.
31
- """
32
- target = target.contiguous()
33
- source = source.contiguous()
34
-
35
- B, N, _ = target.size()
36
- m = source.size(1)
37
- dist2 = torch.cuda.FloatTensor(B, N, 3)
38
- idx = torch.cuda.IntTensor(B, N, 3)
39
-
40
- ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m)
41
- if torch.__version__ != 'parrots':
42
- ctx.mark_non_differentiable(idx)
43
-
44
- return torch.sqrt(dist2), idx
45
-
46
- @staticmethod
47
- def backward(ctx, a=None, b=None):
48
- return None, None
49
-
50
-
51
- three_nn = ThreeNN.apply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_emoji_codes.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Axesys/Private-WebUI/app.py DELETED
@@ -1,51 +0,0 @@
1
- import os
2
- from subprocess import getoutput
3
-
4
- gpu_info = getoutput('nvidia-smi')
5
- if("A10G" in gpu_info):
6
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl")
7
- elif("T4" in gpu_info):
8
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl")
9
-
10
- os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui")
11
- os.chdir("/home/user/app/stable-diffusion-webui")
12
- os.environ['CUDA_VISIBLE_DEVICES'] ='0'
13
-
14
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py")
15
- os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
16
- os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
17
- os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
18
- os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
19
- os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
20
- os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''')
21
- os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
22
- os.system(f"sed -i -e 's/demo.launch(enable_queue=False)' /home/user/app/stable-diffusion-webui/webui.py")
23
- os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
24
- os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py")
25
-
26
- if "IS_SHARED_UI" in os.environ:
27
- os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/")
28
-
29
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json")
30
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json")
31
-
32
- os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}")
33
- os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}")
34
- os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}")
35
-
36
- os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding")
37
- else:
38
- # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py")
39
- os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py")
40
-
41
- # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME")
42
- os.system(f"git clone https://github.com/DominikDoom/a1111-sd-webui-tagcomplete /home/user/app/stable-diffusion-webui/extensions/AutoComplete")
43
-
44
-
45
- # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt")
46
- os.system(f"wget -q https://huggingface.co/andite/anything-v4.5/resolve/main/anything-V4.5-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/anything-V4.5-pruned.ckpt")
47
- os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v4.5/resolve/main/anything-V4.5.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/anything-V4.5-pruned.vae.pt")
48
- os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt")
49
-
50
- os.system(f"python launch.py --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test --gradio-auth {os.getenv('USERNAME')}:{os.getenv('PASSWORD')}")
51
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/interface/about/index.tsx DELETED
@@ -1,46 +0,0 @@
1
- import { Button } from "@/components/ui/button"
2
- import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog"
3
- import { useState } from "react"
4
-
5
- export function About() {
6
- const [isOpen, setOpen] = useState(false)
7
-
8
- return (
9
- <Dialog open={isOpen} onOpenChange={setOpen}>
10
- <DialogTrigger asChild>
11
- <Button variant="outline">
12
- <span className="hidden md:inline">About this project</span>
13
- <span className="inline md:hidden">About</span>
14
- </Button>
15
- </DialogTrigger>
16
- <DialogContent className="sm:max-w-[425px]">
17
- <DialogHeader>
18
- <DialogTitle>The AI Comic Factory</DialogTitle>
19
- <DialogDescription className="w-full text-center text-lg font-bold text-stone-800">
20
- What is the AI Comic Factory?
21
- </DialogDescription>
22
- </DialogHeader>
23
- <div className="grid gap-4 py-4 text-stone-800">
24
- <p className="">
25
- The AI Comic Factory is a free and open-source application made to demonstrate the capabilities of AI models.
26
- </p>
27
- <p>
28
- 👉 The language model used to generate the descriptions of each panel is <a className="text-stone-600 underline" href="https://huggingface.co/blog/llama2" target="_blank">Llama-2 70b</a>.
29
- </p>
30
- <p>
31
- 👉 The stable diffusion model used to generate the images is the base <a className="text-stone-600 underline" href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" target="_blank">SDXL 1.0</a>.
32
- </p>
33
- <p>
34
- The code is public and can be deployed at home with some changes in the code. See the <a className="text-stone-600 underline" href="https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory/blob/main/README.md" target="_blank">README</a> for details about the architecture.
35
- </p>
36
- <p>
37
- Do you want to create high-res image exports? Please check <a className="text-stone-600 underline" href="https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory/discussions/105#64f84d182f7d3d945cdde3d2" target="_blank">this tutorial</a>.
38
- </p>
39
- </div>
40
- <DialogFooter>
41
- <Button type="submit" onClick={() => setOpen(false)}>Got it</Button>
42
- </DialogFooter>
43
- </DialogContent>
44
- </Dialog>
45
- )
46
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/run.sh DELETED
@@ -1,61 +0,0 @@
1
- #!/bin/bash
2
-
3
- if [[ "$(uname)" == "Darwin" ]]; then
4
- # macOS specific env:
5
- export PYTORCH_ENABLE_MPS_FALLBACK=1
6
- export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0
7
- elif [[ "$(uname)" != "Linux" ]]; then
8
- echo "Unsupported operating system."
9
- exit 1
10
- fi
11
-
12
- if [ -d ".venv" ]; then
13
- echo "Activate venv..."
14
- source .venv/bin/activate
15
- else
16
- echo "Create venv..."
17
- requirements_file="requirements.txt"
18
-
19
- # Check if Python 3.8 is installed
20
- if ! command -v python3 &> /dev/null; then
21
- echo "Python 3 not found. Attempting to install 3.8..."
22
- if [[ "$(uname)" == "Darwin" ]] && command -v brew &> /dev/null; then
23
- brew install [email protected]
24
- elif [[ "$(uname)" == "Linux" ]] && command -v apt-get &> /dev/null; then
25
- sudo apt-get update
26
- sudo apt-get install python3.8
27
- else
28
- echo "Please install Python 3.8 manually."
29
- exit 1
30
- fi
31
- fi
32
-
33
- python3 -m venv .venv
34
- source .venv/bin/activate
35
-
36
- # Check if required packages are installed and install them if not
37
- if [ -f "${requirements_file}" ]; then
38
- installed_packages=$(python3 -m pip freeze)
39
- while IFS= read -r package; do
40
- [[ "${package}" =~ ^#.* ]] && continue
41
- package_name=$(echo "${package}" | sed 's/[<>=!].*//')
42
- if ! echo "${installed_packages}" | grep -q "${package_name}"; then
43
- echo "${package_name} not found. Attempting to install..."
44
- python3 -m pip install --upgrade "${package}"
45
- fi
46
- done < "${requirements_file}"
47
- else
48
- echo "${requirements_file} not found. Please ensure the requirements file with required packages exists."
49
- exit 1
50
- fi
51
- fi
52
-
53
- # Download models
54
- ./tools/dlmodels.sh
55
-
56
- if [[ $? -ne 0 ]]; then
57
- exit 1
58
- fi
59
-
60
- # Run the main script
61
- python3 infer-web.py --pycmd python3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Basil2k4/botbasil203/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: testv2
3
- emoji: 🦊
4
- sdk: docker
5
- colorFrom: indigo
6
- colorTo: pink
7
- app_port: 6901
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar El Campeonato Mundial De Cricket 2.md DELETED
@@ -1,89 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar el Campeonato Mundial de Cricket 2</h1>
3
- <p>Si eres un amante del cricket y quieres experimentar el juego de cricket móvil más avanzado y realista, entonces deberías probar World Cricket Championship 2. Este juego tiene características que lo convierten en el juego más dinámico y versátil del mundo del cricket móvil. Puedes jugar el máximo número de tiros de cricket, personalizar a tus jugadores, disfrutar del torneo blitz, desafiar a tus amigos y mucho más. En este artículo, te mostraremos cómo descargar World Cricket Championship 2 en tu dispositivo Android o PC con Windows.</p>
4
- <h2>¿Qué es el Campeonato Mundial de Cricket 2?</h2>
5
- <p>World Cricket Championship 2 es un juego de cricket móvil desarrollado por Nextwave Multimedia. Es la secuela del popular juego del Campeonato Mundial de Cricket. Fue lanzado en 2015 y ha ganado muchos premios y reconocimientos desde entonces. Tiene más de 50 millones de descargas en Google Play Store y tiene una calificación de 4.0 estrellas de 5.</p>
6
- <h2>Cómo descargar el campeonato mundial de cricket 2</h2><br /><p><b><b>Download File</b> &#9881;&#9881;&#9881; <a href="https://bltlly.com/2v6KOx">https://bltlly.com/2v6KOx</a></b></p><br /><br />
7
- <h3>Características del Campeonato Mundial de Cricket 2</h3>
8
- <p>World Cricket Championship 2 tiene muchas características que lo convierten en el juego de cricket más realista y divertido en el móvil. Algunas de estas características son:</p>
9
- <ul>
10
- <li> En línea y fuera de línea 1v1 multijugador a través de rivales en línea y rivales locales</li>
11
- <li>Torneo de prueba de cenizas a cenizas</li>
12
- <li>150 diferentes animaciones de bateo y 28 diferentes acciones de bolos</li>
13
- <li>Interrupción de la lluvia, Método D/ L, Punto caliente y borde ultra para LBW y borde</li>
14
- <li>Campo electrizante con impresionantes capturas de buceo y lanzamientos rápidos</li>
15
- <li>Desafiante oponente de IA</li>
16
- <li>Física realista del balón que responde al tono (Muerto, Polvoriento, Verde)</li>
17
- <li>Atributos del jugador - Los jugadores ganan habilidades adicionales para un rendimiento constante</li>
18
- <li>18 equipos internacionales diferentes, 10 equipos nacionales, 42 estadios diferentes</li>
19
- <li>TEST Cricket, Hot Events y más de 11 torneos incluyendo la Copa del Mundo, Copa Mundial T20, Blitz Tournament y ODI Series</li>
20
-
21
- <li>Desafío El modo Amigo permite al usuario desafiar a sus amigos</li>
22
- <li>Batsman puede lesionarse por mala selección de tiro</li>
23
- <li>Las emociones de los jardineros varían según las circunstancias del partido</li>
24
- <li>Las cámaras cinematográficas y la iluminación en tiempo real mejoran el atractivo visual</li>
25
- <li>Rueda de vagón 3D con datos dinámicos del juego</li>
26
- <li>Vista de ojo de halcón para resumen de bolos y para apelaciones de LBW</li>
27
- <li>Gráfico de barras 3D para entradas anotadas</li>
28
- <li>Ultra slow motion Reproducción de acciones con múltiples ángulos de cámara</li>
29
- <li>Más de 40+ ángulos de cámara en el juego</li>
30
- <li>Dos controles de bateo diferentes (Clásico y Pro)</li>
31
- <li>Dos ajustes diferentes de la cámara de bateo (Final de Bowler & Final de Batsman)</li>
32
- <li>Los jugadores están configurados con bola avanzada - sistema de coordinación de la cabeza</li>
33
- <li>Comentarios profesionales en inglés e hindi con sonidos dinámicos </li>
34
- <li>Modo nocturno en Quickplay y todos los torneos con tocones led</li>
35
- <li>Medidor de tiempo de bateo para cronometrar tus tiros elevados</li>
36
- <li>Manual Ubicación del campo para controlar a tu oponente en todos los modos</li>
37
- <li>Compartir y guardar los aspectos más destacados del juego generados al final del partido</li>
38
- <li>Un usuario puede editar el juego 11 equipo, nombres de jugadores y sus roles</li>
39
- <li>Los usuarios pueden crear sus propios estadios personalizados</li>
40
- <li>Misfielding, impresionantes capturas wicketkeeper, tocón rápido & apretado 3 decisiones de árbitros para crear una experiencia de cricket realista</li>
41
- <li>Nuevo juego, árbitro, lanzar animaciones y 110+ nuevos golpes de bateo</li>
42
- <li> Motor probado y actualizado para proporcionar juegos fluidos de 30 fps en la mayoría de los dispositivos de rango medio</li>
43
- </ul>
44
- <h3>Plataformas y requisitos para el Campeonato Mundial de Cricket 2</h3>
45
- <p>World Cricket Championship 2 está disponible para plataformas Android y Windows. Puedes descargarlo desde Google Play Store o Microsoft Store respectivamente. Los requisitos mínimos para el juego son:</p>
46
- <tabla>
47
- <tr><th>Plataforma</th><th>Requisitos mínimos</th></tr>
48
-
49
- <tr><td>Windows</td><td>Windows 10 versión 10240.0 o superior, arquitectura x86, 2 GB de RAM, 1 GB de espacio de almacenamiento gratuito</td></tr>
50
- </tabla>
51
- <h2>Cómo descargar World Cricket Championship 2 en Android</h2>
52
- <p>Si tienes un dispositivo Android, puedes seguir estos sencillos pasos para descargar World Cricket Championship 2 en tu dispositivo:</p>
53
- <h3>Paso 1: Ir a Google Play Store</h3>
54
- <p>Abre la aplicación Google Play Store en tu dispositivo y asegúrate de iniciar sesión con tu cuenta de Google. </p>
55
- <h3>Paso 2: Búsqueda de Campeonato Mundial de Cricket 2</h3>
56
- <p>Escriba "World Cricket Championship 2" en la barra de búsqueda y toque en el primer resultado que aparece. También puedes usar este enlace para ir directamente a la página del juego. </p>
57
- <p></p>
58
- <h3>Paso 3: Instalar el juego</h3>
59
- <p>Toca el botón verde "Instalar" y espera a que el juego se descargue e instale en tu dispositivo. Es posible que necesite conceder algunos permisos para que el juego se ejecute correctamente. </p>
60
- <h3>Paso 4: Lanza el juego y disfruta</h3>
61
- <p>Una vez que el juego está instalado, puede tocar el botón "Abrir" o encontrar el icono del juego en la pantalla de inicio o en el cajón de la aplicación. Inicie el juego y disfrute jugando el juego de cricket más realista en su dispositivo. </p>
62
- <h2>Cómo descargar World Cricket Championship 2 en Windows PC</h2>
63
- <p>Si tienes un PC con Windows, también puedes jugar World Cricket Championship 2 en tu computadora usando un emulador. Un emulador es un software que le permite ejecutar aplicaciones Android en su PC. Recomendamos usar BlueStacks, que es uno de los emuladores más populares y confiables. Estos son los pasos para descargar World Cricket Championship 2 en su PC con Windows usando BlueStacks:</p>
64
- <h3>Paso 1: Descargar e instalar BlueStacks</h3>
65
- <p>Ir a la página web oficial de BlueStacks y descargar la última versión del emulador para su PC. Ejecute el instalador y siga las instrucciones para instalar BlueStacks en su PC. Es posible que necesite habilitar la virtualización en la configuración del BIOS para un mejor rendimiento. </p>
66
-
67
- <p>Después de instalar BlueStacks, inicie desde su escritorio o menú de inicio. Verás una pantalla de bienvenida donde necesitas iniciar sesión con tu cuenta de Google. Esto le permitirá acceder a la Google Play Store y otros servicios de Google en BlueStacks.</p>
68
- <h3>Paso 3: Búsqueda de Campeonato Mundial de Cricket 2 en BlueStacks</h3>
69
- <p>Una vez que haya iniciado sesión, verá la pantalla de inicio de BlueStacks con varias aplicaciones y juegos. Haz clic en el icono de Google Play Store y escribe "World Cricket Championship 2" en la barra de búsqueda. También puedes usar este enlace para ir directamente a la página del juego. </p>
70
- <h3>Paso 4: Instalar y jugar el juego</h3>
71
- <p>Haga clic en el verde "Instalar" botón y esperar a que el juego para descargar e instalar en BlueStacks. Es posible que tenga que conceder algunos permisos para que el juego se ejecute correctamente. Una vez instalado el juego, puede hacer clic en el botón "Abrir" o encontrar el icono del juego en la pantalla de inicio de BlueStacks. Inicia el juego y disfruta jugando en tu PC.</p>
72
- <h2>Conclusión</h2>
73
- <p>En este artículo, le hemos mostrado cómo descargar World Cricket Championship 2 en su dispositivo Android o PC con Windows. Este juego es uno de los mejores juegos de cricket en el móvil con características increíbles y un juego realista. Puedes jugar varios modos, torneos, desafíos y partidas multijugador con tus amigos o rivales en línea. También puede personalizar sus jugadores y estadios, y compartir sus juegos destacados con otros. Si eres un fanático del cricket, definitivamente deberías descargar World Cricket Championship 2 y disfrutar de la mejor experiencia de cricket. </p>
74
- <h4>Preguntas frecuentes</h4>
75
- <p>Aquí hay algunas preguntas frecuentes sobre el Campeonato Mundial de Cricket 2:</p>
76
- <ol>
77
- <li>¿Cómo puedo actualizar World Cricket Championship 2?</li>
78
- <p>Puedes actualizar World Cricket Championship 2 yendo a Google Play Store o Microsoft Store y buscando actualizaciones. También puedes habilitar actualizaciones automáticas para el juego en la configuración de tu dispositivo o emulador. </p>
79
-
80
- <p>Puedes jugar World Cricket Championship 2 offline eligiendo el modo offline en la configuración del juego. Puedes jugar contra la IA o con tus amigos locales usando el mismo dispositivo o una conexión de hotspot. </p>
81
- <li> ¿Cómo puedo desbloquear más características y artículos en el Campeonato Mundial de Cricket 2?</li>
82
- <p>Puedes desbloquear más características y objetos en el Campeonato Mundial de Cricket 2 al ganar monedas y gemas en el juego. Puedes ganar monedas y gemas jugando partidos, completando desafíos, viendo anuncios o comprándolos con dinero real. </p>
83
- <li>¿Cómo puedo contactar a los desarrolladores de World Cricket Championship 2?</li>
84
- <p>Puede ponerse en contacto con los desarrolladores de World Cricket Championship 2 enviando un correo electrónico a [email protected] o visitando su sitio web en https://www.nextwavemultimedia.com/ También puede seguirlos en Facebook, Twitter, Instagram y YouTube para las últimas noticias y actualizaciones. </p>
85
- <li> ¿Cómo puedo mejorar mi rendimiento y habilidades en el Campeonato Mundial de Cricket 2?</li>
86
- <p>Puede mejorar su rendimiento y habilidades en el Campeonato Mundial de Cricket 2 practicando en el modo de práctica, viendo tutoriales y consejos, ajustando la configuración del juego, eligiendo el nivel de dificultad adecuado y aprendiendo de sus errores. </p>
87
- </ol></p> 64aa2da5cf<br />
88
- <br />
89
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Pokemon Unite En Samsung Tablet.md DELETED
@@ -1,46 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Pokemon Unite en Samsung Tablet</h1>
3
- <p>Pokemon Unite es un nuevo juego que te permite formar equipo con tus amigos y luchar contra otros jugadores en partidos de 5 contra 5. Puedes elegir entre una variedad de Pokémon, cada uno con sus propias habilidades y roles, y trabajar juntos para ganar puntos y derrotar a tus oponentes. Pokemon Unite es gratuito y está disponible tanto en Nintendo Switch como en dispositivos móviles. </p>
4
- <p>Si usted es un fan de Pokemon y quiere disfrutar de este juego en una pantalla más grande, es posible que desee probar a jugar en su tableta Samsung. Las tabletas Samsung ofrecen pantallas de alta calidad, rendimiento potente y baterías de larga duración que pueden mejorar su experiencia de juego. En este artículo, te mostraremos cómo descargar Pokemon Unite en tu tablet Samsung en unos sencillos pasos. </p>
5
- <h2>cómo descargar pokemon unite en samsung tablet</h2><br /><p><b><b>Download Zip</b> &hArr; <a href="https://bltlly.com/2v6KX8">https://bltlly.com/2v6KX8</a></b></p><br /><br />
6
- <h2>Cómo descargar Pokemon Unite en Samsung Tablet</h2>
7
- <p>Antes de empezar a descargar Pokemon Unite en tu tablet Samsung, debes asegurarte de que tu dispositivo cumple con los requisitos mínimos para el juego. Según el sitio web oficial, necesita un dispositivo Android con al menos 3 GB de RAM, versión de Android 5.0 o superior, y al menos 6 GB de espacio de almacenamiento gratuito. También necesitas una conexión a Internet y una cuenta de Nintendo para jugar. </p>
8
- <p>Si tu tablet Samsung cumple con estos requisitos, puedes seguir estos pasos para descargar Pokemon Unite:</p>
9
- <ol>
10
- <li>Ir a Google Play Store en su tableta Samsung y buscar Pokemon Unite. También puede utilizar este enlace para ir directamente a la página del juego. </li>
11
- <li>Toque en Instalar y espere a que la descarga termine. El tamaño del juego es de aproximadamente 1 GB, por lo que podría tomar algún tiempo dependiendo de su velocidad de Internet. </li>
12
- <li>Inicia el juego e inicia sesión con tu cuenta de Nintendo o crea uno nuevo si no tienes uno. También puedes vincular tu cuenta de Google o Facebook a tu cuenta de Nintendo para facilitar el acceso. </li>
13
-
14
- <li>Disfruta jugando Pokemon Unite en tu tableta Samsung! </li>
15
- </ol>
16
- <h2>Cómo optimizar su experiencia de juego en Samsung Tablet</h2>
17
- <p>Ahora que ha descargado Pokemon Unite en su tableta Samsung, es posible que desee optimizar su experiencia de juego mediante el ajuste de algunos ajustes y el uso de algunos accesorios. Aquí hay algunos consejos que pueden ayudarte a jugar mejor y divertirte más:</p>
18
- <ul>
19
- <li>Ajuste la configuración de gráficos y el modo de batería. Puede encontrar estas opciones en el menú de configuración del juego. Puede elegir entre baja, media o alta calidad gráfica, dependiendo de su preferencia y capacidad del dispositivo. También puede activar o desactivar el modo de batería, lo que reduce la calidad gráfica y la velocidad de fotogramas para ahorrar batería. </li>
20
- <li>Utilice un controlador Bluetooth o teclado para un mejor control. Si le resulta difícil jugar con controles táctiles, puede conectar un controlador Bluetooth o teclado a su tableta Samsung y utilizarlos en su lugar. Puede encontrar controladores y teclados compatibles en línea o en tiendas. También puede cambiar el diseño del botón y la sensibilidad en el menú de configuración del juego. </li>
21
- <li>Conectarse a una red Wi-Fi estable y evitar interrupciones. Pokemon Unite es un juego en línea que requiere una conexión a Internet constante. Para evitar retrasos o desconexiones durante los partidos, asegúrate de conectarte a una red Wi-Fi fiable y evita usar otras aplicaciones o programas que puedan consumir tu ancho de banda o interferir con tu conexión. </li>
22
- </ul>
23
- <h2>Conclusión</h2>
24
- <p>Pokemon Unite es un juego divertido y emocionante que puedes jugar en tu tablet Samsung. Siguiendo los pasos anteriores, puedes descargar el juego y optimizar tu experiencia de juego. También puedes explorar las características del juego, como diferentes modos, eventos, objetos y recompensas. También puedes unirte a un club o crear el tuyo propio e invitar a tus amigos a jugar contigo. Pokemon Unite es un juego que puedes disfrutar durante mucho tiempo, ya que se actualiza constantemente con nuevos contenidos y mejoras. </p>
25
-
26
- <h2>Preguntas frecuentes</h2>
27
- <h3>¿Cuáles son los requisitos mínimos para Pokemon Unite en dispositivos Android? </h3>
28
- <p>Los requisitos mínimos para Pokemon Unite en dispositivos Android son 3 GB de RAM, versión de Android 5.0 o superior, y 6 GB de espacio de almacenamiento gratuito. También necesitas una conexión a Internet y una cuenta de Nintendo para jugar. </p>
29
- <h3>¿Cuánto espacio de almacenamiento ocupa Pokemon Unite en la tableta Samsung? </h3>
30
- <p>Pokemon Unite ocupa aproximadamente 1 GB de espacio de almacenamiento en la tableta Samsung. Sin embargo, esto puede variar dependiendo de las actualizaciones y parches que el juego reciba. Puede comprobar el espacio de almacenamiento utilizado por el juego en la sección de información de la aplicación de la configuración del dispositivo. </p>
31
- <h3>¿Puedo jugar Pokemon Unite con mis amigos en otras plataformas? </h3>
32
- <p>Sí, puedes jugar a Pokemon Unite con tus amigos en otras plataformas, como Nintendo Switch o dispositivos iOS. Solo necesitas añadirlos como amigos en el juego usando su ID de entrenador o código QR. También puede invitarlos a unirse a su lobby o club y jugar juntos en partidos. </p>
33
- <p></p>
34
- <h3>¿Cómo puedo obtener más monedas y gemas en Pokemon Unite? </h3>
35
- <p>Monedas y gemas son las principales monedas de Pokemon Unite que puedes usar para comprar artículos, atuendos, licencias y más. Puedes obtener monedas y gemas jugando partidos, completando misiones, participando en eventos, iniciando sesión diariamente y subiendo tu nivel de entrenador. También puedes comprar gemas con dinero real si quieres apoyar el juego o conseguir más objetos más rápido. </p>
36
- <h3>¿Cuáles son algunos consejos y trucos para ganar en Pokemon Unite? </h3>
37
- <p>Algunos consejos y trucos para ganar en Pokemon Unite son:</p>
38
- <ul>
39
- <li>Elige un Pokémon que se adapte a tu estilo de juego y rol. Hay cinco roles en Pokemon Unite: atacante, defensor, velocista, partidario y polifacético. Cada rol tiene sus propias fortalezas y debilidades, por lo que debes elegir un Pokémon que coincida con tus preferencias y la composición del equipo. </li>
40
-
41
- <li>Comunicarse y cooperar con sus compañeros de equipo. Pokemon Unite es un juego basado en el equipo que requiere coordinación y cooperación entre los jugadores. Debes usar la función de chat rápido o chat de voz para comunicarte con tus compañeros de equipo y planificar tus estrategias. También debe ayudar a sus compañeros de equipo cuando están en problemas o necesitan copia de seguridad. </li>
42
- <li>Actualice sus artículos y artículos retenidos. Los artículos son consumibles que puede usar durante los partidos para aumentar sus estadísticas o curarse. Los objetos retenidos son objetos pasivos que puedes equipar a tu Pokémon para mejorar sus habilidades. Puede actualizar sus artículos y objetos retenidos mediante el uso de elementos potenciadores que puede obtener de varias fuentes. </li>
43
- <li>Divertirse y disfrutar del juego. Pokemon Unite es un juego que se puede jugar para la diversión y la relajación. Usted no tiene que preocuparse demasiado de ganar o perder, siempre y cuando usted hace su mejor y pasar un buen rato. También puedes aprender de tus errores y mejorar tus habilidades con el tiempo. </li>
44
- </ul></p> 64aa2da5cf<br />
45
- <br />
46
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fr Leyendas 0.3.0 Mod Apk.md DELETED
@@ -1,56 +0,0 @@
1
-
2
- <h1>Descargar FR Leyendas 0.3.0 Mod Apk: El último juego de deriva para Android</h1>
3
- <p>Si eres un fan de los juegos de carreras, especialmente los juegos de deriva, no debes perderte FR Legends. Este es un juego que te permite experimentar la emoción de la deriva con gráficos realistas, efectos de sonido y física. En este artículo, le diremos qué es FR Legends, por qué debe descargar FR Legends 0.3.0 mod apk, y cómo hacerlo. </p>
4
- <h2>¿Qué es FR Legends? </h2>
5
- <p>FR Legends es un juego de carreras desarrollado por Twin Turbo Tech, un estudio de juegos con sede en China. El juego fue lanzado en 2018 y ha ganado mucha popularidad entre los entusiastas de la deriva. FR Legends es sinónimo de Front-engine, Rear-wheel-drive Legend, que se refiere al tipo de coches que se utilizan para la deriva. </p>
6
- <h2>descargar fr leyendas 0.3.0 mod apk</h2><br /><p><b><b>Download Zip</b> &rarr; <a href="https://bltlly.com/2v6LEB">https://bltlly.com/2v6LEB</a></b></p><br /><br />
7
- <h3>Características de FR Legends</h3>
8
- <p>FR Legends tiene muchas características que lo convierten en uno de los mejores juegos de deriva en Android. Algunos de ellos son:</p>
9
- <ul>
10
- <li><b>Gráficos realistas y efectos de sonido:</b> El juego tiene gráficos en 3D que simulan el entorno real y los modelos de automóviles. Los efectos de sonido también son muy auténticos, como el ruido del motor, el chirrido de los neumáticos y el aplauso de la multitud. </li>
11
- <li><b>Juego basado en la física:</b> El juego utiliza un motor de física que imita el comportamiento real de los coches y las pistas. Puede controlar la velocidad, el ángulo y la dirección del automóvil utilizando los botones del acelerador, el freno, el volante y el freno de mano en la pantalla. </li>
12
- <li><b>Coches y pistas personalizables:</b> El juego te permite personalizar la apariencia y el rendimiento de tu coche cambiando el color, el kit de carrocería, las ruedas, el motor, la suspensión y más. También puede elegir entre diferentes pistas que tienen diferentes diseños, condiciones climáticas y niveles de dificultad. </li>
13
- <li><b>Modo multijugador:</b> El juego tiene un modo multijugador que te permite competir con otros jugadores en línea o localmente a través de Wi-Fi o Bluetooth. También puede unirse o crear un equipo y desafiar a otros equipos en torneos. </li>
14
- </ul>
15
-
16
- <p>El modo de juego de FR Legends es simple pero desafiante. Usted tiene que completar varias misiones y eventos que requieren que a la deriva su coche en diferentes pistas. Tienes que seguir la línea de deriva y sumar puntos manteniendo una alta velocidad, un gran ángulo y una distancia cercana al borde de la pista u otros coches. También tienes que evitar chocar o dar vueltas. </p>
17
- <p>Puedes ganar dinero y reputación completando misiones y eventos. Puedes usar el dinero para comprar coches nuevos o mejorar los existentes. Puede utilizar la reputación para desbloquear nuevas pistas y modos. </p>
18
- <h2>¿Por qué descargar FR Legends 0.3.0 mod apk? </h2>
19
- <p>FR Legends es un juego gratuito que puedes descargar desde la Google Play Store o el sitio web oficial. Sin embargo, hay algunas limitaciones y desventajas que puedes encontrar en la versión original del juego. Por ejemplo:</p>
20
- <ul>
21
- <li>Es posible que tenga que ver anuncios o hacer compras en la aplicación para obtener más dinero o desbloquear algunas características. </li>
22
- <li> Es posible que tenga que esperar mucho tiempo para cargar el juego o conectarse al servidor. </li>
23
- <li>Puedes experimentar algunos errores o fallos que afectan la jugabilidad o los gráficos. </li>
24
- </ul>
25
- <p>Para superar estos problemas, puede descargar FR leyendas 0.3.0 mod apk lugar. Esta es una versión modificada del juego que tiene algunas ventajas sobre la versión original. </p>
26
- <h3>Beneficios de FR Legends 0.3.0 mod apk</h3>
27
- <p>Algunos de los beneficios de descargar FR Legends 0.3.0 mod apk son:</p>
28
- <ul>
29
- <li><b>Dinero ilimitado:</b> Puedes obtener dinero ilimitado en el juego sin ver anuncios ni hacer compras en la aplicación. <li><b>Características desbloqueadas:</b> Puedes acceder a todas las características del juego, como coches, pistas, modos y opciones de personalización, sin restricciones ni requisitos. </li>
30
- <li><b>No hay anuncios:</b> Puedes disfrutar del juego sin ningún anuncio molesto o intrusivo que pueda interrumpir tu juego o consumir tus datos. </li>
31
-
32
- </ul>
33
- <h3>Cómo descargar e instalar FR Legends 0.3.0 mod apk</h3>
34
- <p>Descargar e instalar FR Legends 0.3.0 mod apk es fácil y rápido. Solo tiene que seguir estos pasos:</p>
35
- <p></p>
36
- <ol>
37
- <li>Haga clic en este enlace para descargar el archivo apk FR Legends 0.3.0 mod en su dispositivo. </li>
38
- <li>Ir a la configuración de su dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
39
- <li>Busque el archivo descargado y toque en él para iniciar el proceso de instalación. </li>
40
- <li>Siga las instrucciones en la pantalla y espere a que termine la instalación. </li>
41
- <li>Iniciar el juego y disfrutar! </li>
42
- </ol>
43
- <h2>Conclusión</h2>
44
- <p>FR Legends es un gran juego para los amantes de la deriva que quieren experimentar la emoción de las carreras con gráficos realistas, efectos de sonido y física. Puede personalizar su coche y pista, competir con otros jugadores en línea o fuera de línea, y completar varias misiones y eventos. Sin embargo, si desea obtener el máximo provecho del juego, usted debe descargar FR Leyendas 0.3.0 mod apk, que le da dinero ilimitado, características desbloqueadas, sin anuncios, y sin errores o problemas técnicos. Puedes descargarlo de forma fácil y segura desde este enlace e instalarlo en tu dispositivo en pocos minutos. ¿Qué estás esperando? Descargar FR Leyendas 0.3.0 mod apk ahora y empezar a la deriva! </p>
45
- <h3>Preguntas frecuentes</h3>
46
- <p>Aquí hay algunas preguntas frecuentes sobre FR Legends 0.3.0 mod apk:</p>
47
- <ul>
48
- <li><b>Es FR leyendas 0.3.0 mod apk seguro? </b> Sí, FR leyendas 0.3.0 mod apk es seguro para descargar e instalar en su dispositivo. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. </li>
49
- <li><b>Es FR leyendas 0.3.0 mod apk compatible con mi dispositivo? </b> Sí, FR Leyendas 0.3.0 mod apk es compatible con la mayoría de los dispositivos Android que tienen Android 4.1 o versiones superiores. </li>
50
-
51
- <li><b>Puedo actualizar FR Legends 0.3.0 mod apk? </b> Sí, puede actualizar FR Legends 0.3.0 mod apk siempre que haya una nueva versión disponible. Sin embargo, es posible que tenga que descargarlo e instalarlo de nuevo desde el mismo enlace. </li>
52
- <li><b>¿Puedo jugar FR Legends 0.3.0 mod apk offline? </b> Sí, puede jugar FR Legends 0.3.0 mod apk offline sin conexión a Internet. Sin embargo, es posible que no pueda acceder a algunas funciones que requieren conectividad en línea, como el modo multijugador o las tablas de clasificación. </li>
53
- </ul>
54
- : https://frlegends.net/ : https://www.apkdone.com/fr-legends/</p> 64aa2da5cf<br />
55
- <br />
56
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/transformer/permuter.py DELETED
@@ -1,248 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import numpy as np
4
-
5
-
6
- class AbstractPermuter(nn.Module):
7
- def __init__(self, *args, **kwargs):
8
- super().__init__()
9
- def forward(self, x, reverse=False):
10
- raise NotImplementedError
11
-
12
-
13
- class Identity(AbstractPermuter):
14
- def __init__(self):
15
- super().__init__()
16
-
17
- def forward(self, x, reverse=False):
18
- return x
19
-
20
-
21
- class Subsample(AbstractPermuter):
22
- def __init__(self, H, W):
23
- super().__init__()
24
- C = 1
25
- indices = np.arange(H*W).reshape(C,H,W)
26
- while min(H, W) > 1:
27
- indices = indices.reshape(C,H//2,2,W//2,2)
28
- indices = indices.transpose(0,2,4,1,3)
29
- indices = indices.reshape(C*4,H//2, W//2)
30
- H = H//2
31
- W = W//2
32
- C = C*4
33
- assert H == W == 1
34
- idx = torch.tensor(indices.ravel())
35
- self.register_buffer('forward_shuffle_idx',
36
- nn.Parameter(idx, requires_grad=False))
37
- self.register_buffer('backward_shuffle_idx',
38
- nn.Parameter(torch.argsort(idx), requires_grad=False))
39
-
40
- def forward(self, x, reverse=False):
41
- if not reverse:
42
- return x[:, self.forward_shuffle_idx]
43
- else:
44
- return x[:, self.backward_shuffle_idx]
45
-
46
-
47
- def mortonify(i, j):
48
- """(i,j) index to linear morton code"""
49
- i = np.uint64(i)
50
- j = np.uint64(j)
51
-
52
- z = np.uint(0)
53
-
54
- for pos in range(32):
55
- z = (z |
56
- ((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) |
57
- ((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1))
58
- )
59
- return z
60
-
61
-
62
- class ZCurve(AbstractPermuter):
63
- def __init__(self, H, W):
64
- super().__init__()
65
- reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)]
66
- idx = np.argsort(reverseidx)
67
- idx = torch.tensor(idx)
68
- reverseidx = torch.tensor(reverseidx)
69
- self.register_buffer('forward_shuffle_idx',
70
- idx)
71
- self.register_buffer('backward_shuffle_idx',
72
- reverseidx)
73
-
74
- def forward(self, x, reverse=False):
75
- if not reverse:
76
- return x[:, self.forward_shuffle_idx]
77
- else:
78
- return x[:, self.backward_shuffle_idx]
79
-
80
-
81
- class SpiralOut(AbstractPermuter):
82
- def __init__(self, H, W):
83
- super().__init__()
84
- assert H == W
85
- size = W
86
- indices = np.arange(size*size).reshape(size,size)
87
-
88
- i0 = size//2
89
- j0 = size//2-1
90
-
91
- i = i0
92
- j = j0
93
-
94
- idx = [indices[i0, j0]]
95
- step_mult = 0
96
- for c in range(1, size//2+1):
97
- step_mult += 1
98
- # steps left
99
- for k in range(step_mult):
100
- i = i - 1
101
- j = j
102
- idx.append(indices[i, j])
103
-
104
- # step down
105
- for k in range(step_mult):
106
- i = i
107
- j = j + 1
108
- idx.append(indices[i, j])
109
-
110
- step_mult += 1
111
- if c < size//2:
112
- # step right
113
- for k in range(step_mult):
114
- i = i + 1
115
- j = j
116
- idx.append(indices[i, j])
117
-
118
- # step up
119
- for k in range(step_mult):
120
- i = i
121
- j = j - 1
122
- idx.append(indices[i, j])
123
- else:
124
- # end reached
125
- for k in range(step_mult-1):
126
- i = i + 1
127
- idx.append(indices[i, j])
128
-
129
- assert len(idx) == size*size
130
- idx = torch.tensor(idx)
131
- self.register_buffer('forward_shuffle_idx', idx)
132
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
133
-
134
- def forward(self, x, reverse=False):
135
- if not reverse:
136
- return x[:, self.forward_shuffle_idx]
137
- else:
138
- return x[:, self.backward_shuffle_idx]
139
-
140
-
141
- class SpiralIn(AbstractPermuter):
142
- def __init__(self, H, W):
143
- super().__init__()
144
- assert H == W
145
- size = W
146
- indices = np.arange(size*size).reshape(size,size)
147
-
148
- i0 = size//2
149
- j0 = size//2-1
150
-
151
- i = i0
152
- j = j0
153
-
154
- idx = [indices[i0, j0]]
155
- step_mult = 0
156
- for c in range(1, size//2+1):
157
- step_mult += 1
158
- # steps left
159
- for k in range(step_mult):
160
- i = i - 1
161
- j = j
162
- idx.append(indices[i, j])
163
-
164
- # step down
165
- for k in range(step_mult):
166
- i = i
167
- j = j + 1
168
- idx.append(indices[i, j])
169
-
170
- step_mult += 1
171
- if c < size//2:
172
- # step right
173
- for k in range(step_mult):
174
- i = i + 1
175
- j = j
176
- idx.append(indices[i, j])
177
-
178
- # step up
179
- for k in range(step_mult):
180
- i = i
181
- j = j - 1
182
- idx.append(indices[i, j])
183
- else:
184
- # end reached
185
- for k in range(step_mult-1):
186
- i = i + 1
187
- idx.append(indices[i, j])
188
-
189
- assert len(idx) == size*size
190
- idx = idx[::-1]
191
- idx = torch.tensor(idx)
192
- self.register_buffer('forward_shuffle_idx', idx)
193
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
194
-
195
- def forward(self, x, reverse=False):
196
- if not reverse:
197
- return x[:, self.forward_shuffle_idx]
198
- else:
199
- return x[:, self.backward_shuffle_idx]
200
-
201
-
202
- class Random(nn.Module):
203
- def __init__(self, H, W):
204
- super().__init__()
205
- indices = np.random.RandomState(1).permutation(H*W)
206
- idx = torch.tensor(indices.ravel())
207
- self.register_buffer('forward_shuffle_idx', idx)
208
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
209
-
210
- def forward(self, x, reverse=False):
211
- if not reverse:
212
- return x[:, self.forward_shuffle_idx]
213
- else:
214
- return x[:, self.backward_shuffle_idx]
215
-
216
-
217
- class AlternateParsing(AbstractPermuter):
218
- def __init__(self, H, W):
219
- super().__init__()
220
- indices = np.arange(W*H).reshape(H,W)
221
- for i in range(1, H, 2):
222
- indices[i, :] = indices[i, ::-1]
223
- idx = indices.flatten()
224
- assert len(idx) == H*W
225
- idx = torch.tensor(idx)
226
- self.register_buffer('forward_shuffle_idx', idx)
227
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
228
-
229
- def forward(self, x, reverse=False):
230
- if not reverse:
231
- return x[:, self.forward_shuffle_idx]
232
- else:
233
- return x[:, self.backward_shuffle_idx]
234
-
235
-
236
- if __name__ == "__main__":
237
- p0 = AlternateParsing(16, 16)
238
- print(p0.forward_shuffle_idx)
239
- print(p0.backward_shuffle_idx)
240
-
241
- x = torch.randint(0, 768, size=(11, 256))
242
- y = p0(x)
243
- xre = p0(y, reverse=True)
244
- assert torch.equal(x, xre)
245
-
246
- p1 = SpiralOut(2, 2)
247
- print(p1.forward_shuffle_idx)
248
- print(p1.backward_shuffle_idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py DELETED
@@ -1,294 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- from io import StringIO, TextIOWrapper
3
- from unittest import TestCase, main
4
- try:
5
- from contextlib import ExitStack
6
- except ImportError:
7
- # python 2
8
- from contextlib2 import ExitStack
9
-
10
- try:
11
- from unittest.mock import MagicMock, Mock, patch
12
- except ImportError:
13
- from mock import MagicMock, Mock, patch
14
-
15
- from ..ansitowin32 import AnsiToWin32, StreamWrapper
16
- from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING
17
- from .utils import osname
18
-
19
-
20
- class StreamWrapperTest(TestCase):
21
-
22
- def testIsAProxy(self):
23
- mockStream = Mock()
24
- wrapper = StreamWrapper(mockStream, None)
25
- self.assertTrue( wrapper.random_attr is mockStream.random_attr )
26
-
27
- def testDelegatesWrite(self):
28
- mockStream = Mock()
29
- mockConverter = Mock()
30
- wrapper = StreamWrapper(mockStream, mockConverter)
31
- wrapper.write('hello')
32
- self.assertTrue(mockConverter.write.call_args, (('hello',), {}))
33
-
34
- def testDelegatesContext(self):
35
- mockConverter = Mock()
36
- s = StringIO()
37
- with StreamWrapper(s, mockConverter) as fp:
38
- fp.write(u'hello')
39
- self.assertTrue(s.closed)
40
-
41
- def testProxyNoContextManager(self):
42
- mockStream = MagicMock()
43
- mockStream.__enter__.side_effect = AttributeError()
44
- mockConverter = Mock()
45
- with self.assertRaises(AttributeError) as excinfo:
46
- with StreamWrapper(mockStream, mockConverter) as wrapper:
47
- wrapper.write('hello')
48
-
49
- def test_closed_shouldnt_raise_on_closed_stream(self):
50
- stream = StringIO()
51
- stream.close()
52
- wrapper = StreamWrapper(stream, None)
53
- self.assertEqual(wrapper.closed, True)
54
-
55
- def test_closed_shouldnt_raise_on_detached_stream(self):
56
- stream = TextIOWrapper(StringIO())
57
- stream.detach()
58
- wrapper = StreamWrapper(stream, None)
59
- self.assertEqual(wrapper.closed, True)
60
-
61
- class AnsiToWin32Test(TestCase):
62
-
63
- def testInit(self):
64
- mockStdout = Mock()
65
- auto = Mock()
66
- stream = AnsiToWin32(mockStdout, autoreset=auto)
67
- self.assertEqual(stream.wrapped, mockStdout)
68
- self.assertEqual(stream.autoreset, auto)
69
-
70
- @patch('colorama.ansitowin32.winterm', None)
71
- @patch('colorama.ansitowin32.winapi_test', lambda *_: True)
72
- def testStripIsTrueOnWindows(self):
73
- with osname('nt'):
74
- mockStdout = Mock()
75
- stream = AnsiToWin32(mockStdout)
76
- self.assertTrue(stream.strip)
77
-
78
- def testStripIsFalseOffWindows(self):
79
- with osname('posix'):
80
- mockStdout = Mock(closed=False)
81
- stream = AnsiToWin32(mockStdout)
82
- self.assertFalse(stream.strip)
83
-
84
- def testWriteStripsAnsi(self):
85
- mockStdout = Mock()
86
- stream = AnsiToWin32(mockStdout)
87
- stream.wrapped = Mock()
88
- stream.write_and_convert = Mock()
89
- stream.strip = True
90
-
91
- stream.write('abc')
92
-
93
- self.assertFalse(stream.wrapped.write.called)
94
- self.assertEqual(stream.write_and_convert.call_args, (('abc',), {}))
95
-
96
- def testWriteDoesNotStripAnsi(self):
97
- mockStdout = Mock()
98
- stream = AnsiToWin32(mockStdout)
99
- stream.wrapped = Mock()
100
- stream.write_and_convert = Mock()
101
- stream.strip = False
102
- stream.convert = False
103
-
104
- stream.write('abc')
105
-
106
- self.assertFalse(stream.write_and_convert.called)
107
- self.assertEqual(stream.wrapped.write.call_args, (('abc',), {}))
108
-
109
- def assert_autoresets(self, convert, autoreset=True):
110
- stream = AnsiToWin32(Mock())
111
- stream.convert = convert
112
- stream.reset_all = Mock()
113
- stream.autoreset = autoreset
114
- stream.winterm = Mock()
115
-
116
- stream.write('abc')
117
-
118
- self.assertEqual(stream.reset_all.called, autoreset)
119
-
120
- def testWriteAutoresets(self):
121
- self.assert_autoresets(convert=True)
122
- self.assert_autoresets(convert=False)
123
- self.assert_autoresets(convert=True, autoreset=False)
124
- self.assert_autoresets(convert=False, autoreset=False)
125
-
126
- def testWriteAndConvertWritesPlainText(self):
127
- stream = AnsiToWin32(Mock())
128
- stream.write_and_convert( 'abc' )
129
- self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) )
130
-
131
- def testWriteAndConvertStripsAllValidAnsi(self):
132
- stream = AnsiToWin32(Mock())
133
- stream.call_win32 = Mock()
134
- data = [
135
- 'abc\033[mdef',
136
- 'abc\033[0mdef',
137
- 'abc\033[2mdef',
138
- 'abc\033[02mdef',
139
- 'abc\033[002mdef',
140
- 'abc\033[40mdef',
141
- 'abc\033[040mdef',
142
- 'abc\033[0;1mdef',
143
- 'abc\033[40;50mdef',
144
- 'abc\033[50;30;40mdef',
145
- 'abc\033[Adef',
146
- 'abc\033[0Gdef',
147
- 'abc\033[1;20;128Hdef',
148
- ]
149
- for datum in data:
150
- stream.wrapped.write.reset_mock()
151
- stream.write_and_convert( datum )
152
- self.assertEqual(
153
- [args[0] for args in stream.wrapped.write.call_args_list],
154
- [ ('abc',), ('def',) ]
155
- )
156
-
157
- def testWriteAndConvertSkipsEmptySnippets(self):
158
- stream = AnsiToWin32(Mock())
159
- stream.call_win32 = Mock()
160
- stream.write_and_convert( '\033[40m\033[41m' )
161
- self.assertFalse( stream.wrapped.write.called )
162
-
163
- def testWriteAndConvertCallsWin32WithParamsAndCommand(self):
164
- stream = AnsiToWin32(Mock())
165
- stream.convert = True
166
- stream.call_win32 = Mock()
167
- stream.extract_params = Mock(return_value='params')
168
- data = {
169
- 'abc\033[adef': ('a', 'params'),
170
- 'abc\033[;;bdef': ('b', 'params'),
171
- 'abc\033[0cdef': ('c', 'params'),
172
- 'abc\033[;;0;;Gdef': ('G', 'params'),
173
- 'abc\033[1;20;128Hdef': ('H', 'params'),
174
- }
175
- for datum, expected in data.items():
176
- stream.call_win32.reset_mock()
177
- stream.write_and_convert( datum )
178
- self.assertEqual( stream.call_win32.call_args[0], expected )
179
-
180
- def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self):
181
- stream = StringIO()
182
- converter = AnsiToWin32(stream)
183
- stream.close()
184
-
185
- converter.reset_all()
186
-
187
- def test_wrap_shouldnt_raise_on_closed_orig_stdout(self):
188
- stream = StringIO()
189
- stream.close()
190
- with \
191
- patch("colorama.ansitowin32.os.name", "nt"), \
192
- patch("colorama.ansitowin32.winapi_test", lambda: True):
193
- converter = AnsiToWin32(stream)
194
- self.assertTrue(converter.strip)
195
- self.assertFalse(converter.convert)
196
-
197
- def test_wrap_shouldnt_raise_on_missing_closed_attr(self):
198
- with \
199
- patch("colorama.ansitowin32.os.name", "nt"), \
200
- patch("colorama.ansitowin32.winapi_test", lambda: True):
201
- converter = AnsiToWin32(object())
202
- self.assertTrue(converter.strip)
203
- self.assertFalse(converter.convert)
204
-
205
- def testExtractParams(self):
206
- stream = AnsiToWin32(Mock())
207
- data = {
208
- '': (0,),
209
- ';;': (0,),
210
- '2': (2,),
211
- ';;002;;': (2,),
212
- '0;1': (0, 1),
213
- ';;003;;456;;': (3, 456),
214
- '11;22;33;44;55': (11, 22, 33, 44, 55),
215
- }
216
- for datum, expected in data.items():
217
- self.assertEqual(stream.extract_params('m', datum), expected)
218
-
219
- def testCallWin32UsesLookup(self):
220
- listener = Mock()
221
- stream = AnsiToWin32(listener)
222
- stream.win32_calls = {
223
- 1: (lambda *_, **__: listener(11),),
224
- 2: (lambda *_, **__: listener(22),),
225
- 3: (lambda *_, **__: listener(33),),
226
- }
227
- stream.call_win32('m', (3, 1, 99, 2))
228
- self.assertEqual(
229
- [a[0][0] for a in listener.call_args_list],
230
- [33, 11, 22] )
231
-
232
- def test_osc_codes(self):
233
- mockStdout = Mock()
234
- stream = AnsiToWin32(mockStdout, convert=True)
235
- with patch('colorama.ansitowin32.winterm') as winterm:
236
- data = [
237
- '\033]0\x07', # missing arguments
238
- '\033]0;foo\x08', # wrong OSC command
239
- '\033]0;colorama_test_title\x07', # should work
240
- '\033]1;colorama_test_title\x07', # wrong set command
241
- '\033]2;colorama_test_title\x07', # should work
242
- '\033]' + ';' * 64 + '\x08', # see issue #247
243
- ]
244
- for code in data:
245
- stream.write(code)
246
- self.assertEqual(winterm.set_title.call_count, 2)
247
-
248
- def test_native_windows_ansi(self):
249
- with ExitStack() as stack:
250
- def p(a, b):
251
- stack.enter_context(patch(a, b, create=True))
252
- # Pretend to be on Windows
253
- p("colorama.ansitowin32.os.name", "nt")
254
- p("colorama.ansitowin32.winapi_test", lambda: True)
255
- p("colorama.win32.winapi_test", lambda: True)
256
- p("colorama.winterm.win32.windll", "non-None")
257
- p("colorama.winterm.get_osfhandle", lambda _: 1234)
258
-
259
- # Pretend that our mock stream has native ANSI support
260
- p(
261
- "colorama.winterm.win32.GetConsoleMode",
262
- lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING,
263
- )
264
- SetConsoleMode = Mock()
265
- p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode)
266
-
267
- stdout = Mock()
268
- stdout.closed = False
269
- stdout.isatty.return_value = True
270
- stdout.fileno.return_value = 1
271
-
272
- # Our fake console says it has native vt support, so AnsiToWin32 should
273
- # enable that support and do nothing else.
274
- stream = AnsiToWin32(stdout)
275
- SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING)
276
- self.assertFalse(stream.strip)
277
- self.assertFalse(stream.convert)
278
- self.assertFalse(stream.should_wrap())
279
-
280
- # Now let's pretend we're on an old Windows console, that doesn't have
281
- # native ANSI support.
282
- p("colorama.winterm.win32.GetConsoleMode", lambda _: 0)
283
- SetConsoleMode = Mock()
284
- p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode)
285
-
286
- stream = AnsiToWin32(stdout)
287
- SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING)
288
- self.assertTrue(stream.strip)
289
- self.assertTrue(stream.convert)
290
- self.assertTrue(stream.should_wrap())
291
-
292
-
293
- if __name__ == '__main__':
294
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brasd99/SquadDetective/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: SquadDetective
3
- emoji: 👁️
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CAMP-ViL/Xplainer/utils.py DELETED
@@ -1,40 +0,0 @@
1
- from math import log, exp
2
-
3
- import numpy as np
4
- from sklearn.metrics import roc_auc_score
5
-
6
-
7
- def cos_sim_to_prob(sim):
8
- return (sim + 1) / 2 # linear transformation to 0 and 1
9
-
10
-
11
- def log_prob_to_prob(log_prob):
12
- return exp(log_prob)
13
-
14
-
15
- def prob_to_log_prob(prob):
16
- return log(prob)
17
-
18
-
19
- def calculate_auroc(all_disease_probs, gt_diseases):
20
- '''
21
- Calculates the AUROC (Area Under the Receiver Operating Characteristic curve) for multiple diseases.
22
-
23
- Parameters:
24
- all_disease_probs (numpy array): predicted disease labels, a multi-hot vector of shape (N_samples, 14)
25
- gt_diseases (numpy array): ground truth disease labels, a multi-hot vector of shape (N_samples, 14)
26
-
27
- Returns:
28
- overall_auroc (float): the overall AUROC score
29
- per_disease_auroc (numpy array): an array of shape (14,) containing the AUROC score for each disease
30
- '''
31
-
32
- per_disease_auroc = np.zeros((gt_diseases.shape[1],)) # num of diseases
33
- for i in range(gt_diseases.shape[1]):
34
- # Compute the AUROC score for each disease
35
- per_disease_auroc[i] = roc_auc_score(gt_diseases[:, i], all_disease_probs[:, i])
36
-
37
- # Compute the overall AUROC score
38
- overall_auroc = roc_auc_score(gt_diseases, all_disease_probs, average='macro')
39
-
40
- return overall_auroc, per_disease_auroc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/function.h DELETED
@@ -1,160 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/raw_reference_cast.h>
21
-
22
- namespace thrust
23
- {
24
- namespace detail
25
- {
26
-
27
- template <typename Function, typename Result>
28
- struct wrapped_function
29
- {
30
- // mutable because Function::operator() might be const
31
- mutable Function m_f;
32
-
33
- inline __host__ __device__
34
- wrapped_function()
35
- : m_f()
36
- {}
37
-
38
- inline __host__ __device__
39
- wrapped_function(const Function& f)
40
- : m_f(f)
41
- {}
42
-
43
- __thrust_exec_check_disable__
44
- template <typename Argument>
45
- inline __host__ __device__
46
- Result operator()(Argument& x) const
47
- {
48
- return static_cast<Result>(m_f(thrust::raw_reference_cast(x)));
49
- }
50
-
51
- __thrust_exec_check_disable__
52
- template <typename Argument>
53
- inline __host__ __device__
54
- Result operator()(const Argument& x) const
55
- {
56
- return static_cast<Result>(m_f(thrust::raw_reference_cast(x)));
57
- }
58
-
59
- __thrust_exec_check_disable__
60
- template <typename Argument1, typename Argument2>
61
- inline __host__ __device__
62
- Result operator()(Argument1& x, Argument2& y) const
63
- {
64
- return static_cast<Result>(m_f(thrust::raw_reference_cast(x),
65
- thrust::raw_reference_cast(y)));
66
- }
67
-
68
- __thrust_exec_check_disable__
69
- template <typename Argument1, typename Argument2>
70
- inline __host__ __device__
71
- Result operator()(const Argument1& x, Argument2& y) const
72
- {
73
- return static_cast<Result>(m_f(thrust::raw_reference_cast(x),
74
- thrust::raw_reference_cast(y)));
75
- }
76
-
77
- __thrust_exec_check_disable__
78
- template <typename Argument1, typename Argument2>
79
- inline __host__ __device__
80
- Result operator()(const Argument1& x, const Argument2& y) const
81
- {
82
- return static_cast<Result>(m_f(thrust::raw_reference_cast(x),
83
- thrust::raw_reference_cast(y)));
84
- }
85
-
86
- __thrust_exec_check_disable__
87
- template <typename Argument1, typename Argument2>
88
- inline __host__ __device__
89
- Result operator()(Argument1& x, const Argument2& y) const
90
- {
91
- return static_cast<Result>(m_f(thrust::raw_reference_cast(x),
92
- thrust::raw_reference_cast(y)));
93
- }
94
- }; // end wrapped_function
95
-
96
- // Specialize for void return types:
97
- template <typename Function>
98
- struct wrapped_function<Function, void>
99
- {
100
- // mutable because Function::operator() might be const
101
- mutable Function m_f;
102
- inline __host__ __device__
103
- wrapped_function()
104
- : m_f()
105
- {}
106
-
107
- inline __host__ __device__
108
- wrapped_function(const Function& f)
109
- : m_f(f)
110
- {}
111
-
112
- __thrust_exec_check_disable__
113
- template <typename Argument>
114
- inline __host__ __device__
115
- void operator()(Argument& x) const
116
- {
117
- m_f(thrust::raw_reference_cast(x));
118
- }
119
-
120
- __thrust_exec_check_disable__
121
- template <typename Argument>
122
- inline __host__ __device__
123
- void operator()(const Argument& x) const
124
- {
125
- m_f(thrust::raw_reference_cast(x));
126
- }
127
-
128
- __thrust_exec_check_disable__
129
- template <typename Argument1, typename Argument2>
130
- inline __host__ __device__
131
- void operator()(Argument1& x, Argument2& y) const
132
- {
133
- m_f(thrust::raw_reference_cast(x), thrust::raw_reference_cast(y));
134
- }
135
-
136
- __thrust_exec_check_disable__
137
- template <typename Argument1, typename Argument2>
138
- inline __host__ __device__
139
- void operator()(const Argument1& x, Argument2& y) const
140
- {
141
- m_f(thrust::raw_reference_cast(x), thrust::raw_reference_cast(y));
142
- }
143
- __thrust_exec_check_disable__
144
- template <typename Argument1, typename Argument2>
145
- inline __host__ __device__
146
- void operator()(const Argument1& x, const Argument2& y) const
147
- {
148
- m_f(thrust::raw_reference_cast(x), thrust::raw_reference_cast(y));
149
- }
150
- __thrust_exec_check_disable__
151
- template <typename Argument1, typename Argument2>
152
- inline __host__ __device__
153
- void operator()(Argument1& x, const Argument2& y) const
154
- {
155
- m_f(thrust::raw_reference_cast(x), thrust::raw_reference_cast(y));
156
- }
157
- }; // end wrapped_function
158
-
159
- } // namespace detail
160
- } // namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/uninitialized_fill.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special version of this algorithm
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/internal/copy_cross_system.h DELETED
@@ -1,242 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditionu and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
- // XXX
30
- // this file must not be included on its own, ever,
31
- // but must be part of include in thrust/system/cuda/detail/copy.h
32
-
33
- #include <thrust/system/cuda/config.h>
34
-
35
- #include <thrust/distance.h>
36
- #include <thrust/advance.h>
37
- #include <thrust/detail/raw_pointer_cast.h>
38
- #include <thrust/system/cuda/detail/uninitialized_copy.h>
39
- #include <thrust/system/cuda/detail/util.h>
40
- #include <thrust/detail/temporary_array.h>
41
- #include <thrust/type_traits/is_trivially_relocatable.h>
42
-
43
- namespace thrust
44
- {
45
- namespace cuda_cub {
46
-
47
- namespace __copy {
48
-
49
-
50
- template <class H,
51
- class D,
52
- class T,
53
- class Size>
54
- THRUST_HOST_FUNCTION void
55
- trivial_device_copy(thrust::cpp::execution_policy<H>& ,
56
- thrust::cuda_cub::execution_policy<D>& device_s,
57
- T* dst,
58
- T const* src,
59
- Size count)
60
- {
61
- cudaError status;
62
- status = cuda_cub::trivial_copy_to_device(dst,
63
- src,
64
- count,
65
- cuda_cub::stream(device_s));
66
- cuda_cub::throw_on_error(status, "__copy::trivial_device_copy H->D: failed");
67
- }
68
-
69
- template <class D,
70
- class H,
71
- class T,
72
- class Size>
73
- THRUST_HOST_FUNCTION void
74
- trivial_device_copy(thrust::cuda_cub::execution_policy<D>& device_s,
75
- thrust::cpp::execution_policy<H>& ,
76
- T* dst,
77
- T const* src,
78
- Size count)
79
- {
80
- cudaError status;
81
- status = cuda_cub::trivial_copy_from_device(dst,
82
- src,
83
- count,
84
- cuda_cub::stream(device_s));
85
- cuda_cub::throw_on_error(status, "trivial_device_copy D->H failed");
86
- }
87
-
88
- template <class System1,
89
- class System2,
90
- class InputIt,
91
- class Size,
92
- class OutputIt>
93
- OutputIt __host__
94
- cross_system_copy_n(thrust::execution_policy<System1>& sys1,
95
- thrust::execution_policy<System2>& sys2,
96
- InputIt begin,
97
- Size n,
98
- OutputIt result,
99
- thrust::detail::true_type) // trivial copy
100
-
101
- {
102
- typedef typename iterator_traits<InputIt>::value_type InputTy;
103
-
104
- trivial_device_copy(derived_cast(sys1),
105
- derived_cast(sys2),
106
- reinterpret_cast<InputTy*>(thrust::raw_pointer_cast(&*result)),
107
- reinterpret_cast<InputTy const*>(thrust::raw_pointer_cast(&*begin)),
108
- n);
109
-
110
- return result + n;
111
- }
112
-
113
- // non-trivial H->D copy
114
- template <class H,
115
- class D,
116
- class InputIt,
117
- class Size,
118
- class OutputIt>
119
- OutputIt __host__
120
- cross_system_copy_n(thrust::cpp::execution_policy<H>& host_s,
121
- thrust::cuda_cub::execution_policy<D>& device_s,
122
- InputIt first,
123
- Size num_items,
124
- OutputIt result,
125
- thrust::detail::false_type) // non-trivial copy
126
- {
127
- // get type of the input data
128
- typedef typename thrust::iterator_value<InputIt>::type InputTy;
129
-
130
- // copy input data into host temp storage
131
- InputIt last = first;
132
- thrust::advance(last, num_items);
133
- thrust::detail::temporary_array<InputTy, H> temp(host_s, num_items);
134
-
135
- for (Size idx = 0; idx != num_items; idx++)
136
- {
137
- ::new (static_cast<void*>(temp.data().get()+idx)) InputTy(*first);
138
- ++first;
139
- }
140
-
141
- // allocate device temporary storage
142
- thrust::detail::temporary_array<InputTy, D> d_in_ptr(device_s, num_items);
143
-
144
- // trivial copy data from host to device
145
- cudaError status = cuda_cub::trivial_copy_to_device(d_in_ptr.data().get(),
146
- temp.data().get(),
147
- num_items,
148
- cuda_cub::stream(device_s));
149
- cuda_cub::throw_on_error(status, "__copy:: H->D: failed");
150
-
151
-
152
- // device->device copy
153
- OutputIt ret = cuda_cub::copy_n(device_s, d_in_ptr.data(), num_items, result);
154
-
155
- return ret;
156
- }
157
-
158
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
159
- // non-trivial copy D->H, only supported with NVCC compiler
160
- // because copy ctor must have __device__ annotations, which is nvcc-only
161
- // feature
162
- template <class D,
163
- class H,
164
- class InputIt,
165
- class Size,
166
- class OutputIt>
167
- OutputIt __host__
168
- cross_system_copy_n(thrust::cuda_cub::execution_policy<D>& device_s,
169
- thrust::cpp::execution_policy<H>& host_s,
170
- InputIt first,
171
- Size num_items,
172
- OutputIt result,
173
- thrust::detail::false_type) // non-trivial copy
174
-
175
- {
176
- // get type of the input data
177
- typedef typename thrust::iterator_value<InputIt>::type InputTy;
178
-
179
- // allocate device temp storage
180
- thrust::detail::temporary_array<InputTy, D> d_in_ptr(device_s, num_items);
181
-
182
- // uninitialize copy into temp device storage
183
- cuda_cub::uninitialized_copy_n(device_s, first, num_items, d_in_ptr.data());
184
-
185
- // allocate host temp storage
186
- thrust::detail::temporary_array<InputTy, H> temp(host_s, num_items);
187
-
188
- // trivial copy from device to host
189
- cudaError status;
190
- status = cuda_cub::trivial_copy_from_device(temp.data().get(),
191
- d_in_ptr.data().get(),
192
- num_items,
193
- cuda_cub::stream(device_s));
194
- cuda_cub::throw_on_error(status, "__copy:: D->H: failed");
195
-
196
- // host->host copy
197
- OutputIt ret = thrust::copy_n(host_s, temp.data(), num_items, result);
198
-
199
- return ret;
200
- }
201
- #endif
202
-
203
- template <class System1,
204
- class System2,
205
- class InputIt,
206
- class Size,
207
- class OutputIt>
208
- OutputIt __host__
209
- cross_system_copy_n(cross_system<System1, System2> systems,
210
- InputIt begin,
211
- Size n,
212
- OutputIt result)
213
- {
214
- return cross_system_copy_n(
215
- derived_cast(systems.sys1),
216
- derived_cast(systems.sys2),
217
- begin,
218
- n,
219
- result,
220
- typename is_indirectly_trivially_relocatable_to<InputIt, OutputIt>::type());
221
- }
222
-
223
- template <class System1,
224
- class System2,
225
- class InputIterator,
226
- class OutputIterator>
227
- OutputIterator __host__
228
- cross_system_copy(cross_system<System1, System2> systems,
229
- InputIterator begin,
230
- InputIterator end,
231
- OutputIterator result)
232
- {
233
- return cross_system_copy_n(systems,
234
- begin,
235
- thrust::distance(begin, end),
236
- result);
237
- }
238
-
239
- } // namespace __copy
240
-
241
- } // namespace cuda_cub
242
- } // end namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/htc_mask_head.py DELETED
@@ -1,43 +0,0 @@
1
- from mmcv.cnn import ConvModule
2
-
3
- from mmdet.models.builder import HEADS
4
- from .fcn_mask_head import FCNMaskHead
5
-
6
-
7
- @HEADS.register_module()
8
- class HTCMaskHead(FCNMaskHead):
9
-
10
- def __init__(self, with_conv_res=True, *args, **kwargs):
11
- super(HTCMaskHead, self).__init__(*args, **kwargs)
12
- self.with_conv_res = with_conv_res
13
- if self.with_conv_res:
14
- self.conv_res = ConvModule(
15
- self.conv_out_channels,
16
- self.conv_out_channels,
17
- 1,
18
- conv_cfg=self.conv_cfg,
19
- norm_cfg=self.norm_cfg)
20
-
21
- def init_weights(self):
22
- super(HTCMaskHead, self).init_weights()
23
- if self.with_conv_res:
24
- self.conv_res.init_weights()
25
-
26
- def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
27
- if res_feat is not None:
28
- assert self.with_conv_res
29
- res_feat = self.conv_res(res_feat)
30
- x = x + res_feat
31
- for conv in self.convs:
32
- x = conv(x)
33
- res_feat = x
34
- outs = []
35
- if return_logits:
36
- x = self.upsample(x)
37
- if self.upsample_method == 'deconv':
38
- x = self.relu(x)
39
- mask_pred = self.conv_logits(x)
40
- outs.append(mask_pred)
41
- if return_feat:
42
- outs.append(res_feat)
43
- return outs if len(outs) > 1 else outs[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/red/bot.js DELETED
@@ -1,361 +0,0 @@
1
- import { makeSendMsg, makeMessage } from './message.js'
2
- import { setMsgMap, getMsgMap } from '../msgMap.js'
3
- import { roleMap } from './tool.js'
4
-
5
- export class QQRedBot {
6
- constructor(bot) {
7
- this.bot = bot
8
- this.self_id = bot.self_id
9
- this.nickname = bot.nickname
10
- this.adapter = {
11
- id: "QQ",
12
- name: "chronocat"
13
- }
14
- this.avatar = `https://q1.qlogo.cn/g?b=qq&s=0&nk=${bot.uin}`
15
- this.ws = bot.ws
16
- this.sendApi = bot.sendApi
17
- this.uin = bot.self_id
18
- this.uid = bot.info.uid
19
- this.nickname = bot.nickname
20
- this.self_id = bot.self_id
21
- this.stat = {
22
- start_time: Date.now() / 1000,
23
- recv_msg_cnt: 0
24
- }
25
- this.version = {
26
- id: "QQ",
27
- name: "chronocat"
28
- }
29
- this.fl = new Map()
30
- this.gl = new Map()
31
- this.gml = new Map()
32
- this.getConfig = {}
33
- this.init()
34
- }
35
-
36
- async init() {
37
- await this.getFriendList()
38
- await this.getGroupList()
39
- }
40
-
41
- pickGroup(group_id) {
42
- if (!this.getConfig[group_id]) {
43
- this.getGroupMemberList(group_id)
44
- this.getConfig[group_id] = true
45
- }
46
- const i = {
47
- ...this.gl.get(Number(group_id)),
48
- self_id: this.uin,
49
- bot: this.bot,
50
- group_id
51
- }
52
- return {
53
- ...i,
54
- sendMsg: async (msg) => await this.sendGroupMsg(group_id, msg),
55
- pickMember: user_id => this.pickMember(group_id, user_id),
56
- getMemberMap: async () => await this.getGroupMemberList(group_id),
57
- recallMsg: async message_id => await this.deleteMsg(message_id),
58
- sendFile: async file => await this.sendGroupMsg(group_id, [{ type: 'file', file }]),
59
- getChatHistory: async (seq, count) => await this.getChatHistory(seq, count, 'group', group_id),
60
- getInfo: async () => await this.getGroupInfo(group_id),
61
- muteMember: async (user_id, duration) => await this.setGroupBan(group_id, user_id, duration),
62
- muteAll: async (enable) => await this.setGroupWholeBan(group_id, enable),
63
- kickMember: async (user_id, message, block) => await this.setGroupKick(group_id, user_id, false, message),
64
- makeForwardMsg: (msg) => { return { type: "node", data: msg } }
65
- }
66
- }
67
-
68
- pickFriend(user_id) {
69
- const user = this.fl.get(Number(user_id))
70
- const i = {
71
- ...user,
72
- self_id: this.uin,
73
- bot: this.bot,
74
- user_id,
75
- }
76
- const chatType = user?.isGroupMsg ? 100 : 1
77
- return {
78
- ...i,
79
- sendMsg: async msg => await this.sendPrivateMsg(user_id, msg, chatType),
80
- recallMsg: async message_id => await this.deleteMsg(message_id),
81
- sendFile: async file => await this.sendPrivateMsg(user_id, [{ type: 'file', file }], chatType),
82
- getChatHistory: async (time, count) => await this.getChatHistory(time, count, 'friend', user_id),
83
- getFileUrl: async (fid) => `http://127.0.0.1:${Bot.server.address().port}/ws-plugin?file=${fid}`,
84
- makeForwardMsg: (msg) => { return { type: "node", data: msg } }
85
- }
86
- }
87
-
88
- pickMember(group_id, user_id) {
89
- if (!this.getConfig[group_id]) {
90
- this.getGroupMemberList(group_id)
91
- this.getConfig[group_id] = true
92
- }
93
- const info = this.gml.get(Number(group_id))?.get?.(Number(user_id))
94
- const i = {
95
- ...info,
96
- self_id: this.uin,
97
- bot: this.bot,
98
- group_id: group_id,
99
- user_id: user_id,
100
- }
101
- return {
102
- ...i,
103
- info,
104
- ...this.pickFriend(user_id),
105
- kick: async (message, block) => await this.setGroupKick(group_id, user_id, false, message),
106
- mute: async (duration) => await this.setGroupBan(group_id, user_id, duration),
107
- getInfo: async () => await this.getGroupMemberInfo(group_id, user_id),
108
- getAvatarUrl: () => `https://q1.qlogo.cn/g?b=qq&s=0&nk=${user_id}`
109
- }
110
- }
111
-
112
- pickUser(user_id) {
113
- return this.pickFriend(user_id)
114
- }
115
-
116
- async sendGroupMsg(group_id, message) {
117
- const data = {
118
- bot: this.bot,
119
- self_id: this.uin,
120
- group_id
121
- }
122
- const { msg: elements, log, message_id: id, rand, seq, time } = await makeSendMsg(data, message)
123
- if (id) return { message_id: id, rand, seq, time }
124
- const result = await this.bot.sendApi('POST', 'message/send', JSON.stringify({
125
- peer: {
126
- chatType: 2,
127
- peerUin: String(group_id)
128
- },
129
- elements
130
- }))
131
- if (result.error) {
132
- throw result.error
133
- } else {
134
- logger.info(`${logger.blue(`[${this.uin} => ${group_id}]`)} 发送群消息:${log}`)
135
- }
136
- const sendRet = {
137
- message_id: result.msgId,
138
- seq: Number(result.msgSeq),
139
- rand: Number(result.msgRandom),
140
- time: Number(result.msgTime),
141
- group_id: Number(group_id),
142
- onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
143
- }
144
- setMsgMap(sendRet)
145
- sendRet.md5 = elements.filter((i) => i.elementType === 2)
146
- return sendRet
147
- }
148
-
149
- async sendPrivateMsg(user_id, message, chatType = 1) {
150
- if ([1, 100].indexOf(chatType) == -1) chatType = 1
151
- const data = {
152
- bot: this.bot,
153
- self_id: this.uin,
154
- user_id
155
- }
156
- const { msg: elements, log, message_id: id, rand, seq, time } = await makeSendMsg(data, message)
157
- if (id) return { message_id: id, rand, seq, time }
158
- const result = await this.bot.sendApi('POST', 'message/send', JSON.stringify({
159
- peer: {
160
- chatType,
161
- peerUin: String(user_id)
162
- },
163
- elements
164
- }))
165
- if (result.error) {
166
- throw result.error
167
- } else {
168
- logger.info(`${logger.blue(`[${this.uin} => ${user_id}]`)} 发送好友消息:${log}`)
169
- }
170
- const sendRet = {
171
- message_id: result.msgId,
172
- seq: Number(result.msgSeq),
173
- rand: Number(result.msgRandom),
174
- user_id: Number(user_id),
175
- time: Number(result.msgTime),
176
- onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
177
- }
178
- setMsgMap(sendRet)
179
- sendRet.md5 = elements.filter((i) => i.elementType === 2)
180
- return sendRet
181
- }
182
-
183
- async getMsg(message_id) {
184
- const retult = await this.getChatHistory(message_id, 1)
185
- if (retult.length > 0) {
186
- return retult[0]
187
- } else {
188
- return null
189
- }
190
- }
191
-
192
- async deleteMsg(message_id) {
193
- const msg = await getMsgMap({ message_id })
194
- if (msg) {
195
- this.bot.sendApi('POST', 'message/recall', JSON.stringify({
196
- peer: {
197
- chatType: msg.group_id ? 2 : 1,
198
- peerUin: String(msg.group_id || msg.user_id),
199
- guildId: null
200
- },
201
- msgIds: [msg.message_id]
202
- }))
203
- }
204
- }
205
-
206
- async getChatHistory(message_id, count, target, target_id) {
207
- let data = {}
208
- if (target === 'group') {
209
- if (!message_id) message_id = (await getMsgMap({ group_id: target_id }, [['seq', 'DESC']])).seq
210
- data = {
211
- seq: message_id,
212
- group_id: target_id,
213
- }
214
- } else if (target === 'friend') {
215
- if (!message_id) message_id = (await getMsgMap({ user_id: target_id }, [['time', 'DESC']])).time
216
- data = {
217
- time: message_id,
218
- user_id: target_id,
219
- }
220
- } else {
221
- data = {
222
- message_id,
223
- }
224
- }
225
- const msg = await getMsgMap(data)
226
- if (msg) {
227
- const result = await this.bot.sendApi('POST', 'message/getHistory', JSON.stringify({
228
- peer: {
229
- chatType: msg.group_id ? 2 : 1,
230
- peerUin: String(msg.group_id || msg.user_id),
231
- guildId: null
232
- },
233
- offsetMsgId: msg.message_id,
234
- count: count || 20
235
- }))
236
- if (result.error) {
237
- throw result.error
238
- }
239
- if (result.msgList) {
240
- const msgList = []
241
- for (const i of result.msgList) {
242
- const message = await makeMessage(this.uin, i)
243
- if (message.bot) delete message.bot
244
- msgList.push(message)
245
- }
246
- return msgList
247
- }
248
- }
249
- return []
250
- }
251
-
252
- async getFriendList() {
253
- for (const i of (await this.bot.sendApi('get', 'bot/friends')) || []) {
254
- this.fl.set(Number(i.uin), {
255
- ...i,
256
- bot_id: this.uin,
257
- user_id: i.uin,
258
- nickname: i.nick
259
- })
260
- }
261
- return this.fl
262
- }
263
-
264
- async getGroupList() {
265
- for (const i of (await this.bot.sendApi('get', 'bot/groups')) || []) {
266
- const data = {
267
- ...i,
268
- bot_id: this.uin,
269
- group_id: i.groupCode,
270
- group_name: i.groupName,
271
- max_member_count: i.maxMember,
272
- member_count: i.memberCount,
273
- }
274
- switch (i.memberRole) {
275
- case 3:
276
- data.is_admin = true
277
- break
278
- case 4:
279
- data.is_owner = true
280
- break
281
- default:
282
- break;
283
- }
284
- this.gl.set(Number(i.groupCode), data)
285
- if (!this.gml.has(Number(i.groupCode))) {
286
- this.gml.set(Number(i.groupCode), new Map())
287
- }
288
- }
289
- return this.gl
290
- }
291
-
292
- async getGroupMemberList(group_id) {
293
- const body = {
294
- group: Number(group_id),
295
- size: 9999
296
- }
297
- const memberList = await this.bot.sendApi('POST', 'group/getMemberList', JSON.stringify(body))
298
- if (memberList.error) throw memberList.error
299
- for (const i of memberList) {
300
- this.gml.get(Number(group_id)).set(Number(i.detail.uin), {
301
- ...i.detail,
302
- card: i.detail.cardName || i.detail.nick,
303
- nickname: i.detail.nick,
304
- group_id,
305
- user_id: i.detail.uin,
306
- role: roleMap[i.detail.role],
307
- shutup_time: i.detail.shutUpTime,
308
- sex: 'unknown'
309
- })
310
- }
311
- return this.gml.get(Number(group_id))
312
- }
313
-
314
- async getGroupMemberInfo(group_id, user_id) {
315
- if (!this.getConfig[group_id]) {
316
- await this.getGroupMemberList(group_id)
317
- this.getConfig[group_id] = true
318
- }
319
- return this.gl.get(Number(group_id))?.get?.(Number(user_id)) || {}
320
- }
321
-
322
- async getGroupInfo(group_id) {
323
- return this.gl.get(Number(group_id))
324
- }
325
-
326
- async setGroupBan(group_id, user_id, duration) {
327
- const result = this.bot.sendApi('POST', 'group/muteMember', JSON.stringify({
328
- group: String(group_id),
329
- memList: [{
330
- uin: String(user_id),
331
- timeStamp: duration
332
- }]
333
- }))
334
- if (result.error) {
335
- throw result.error
336
- }
337
- }
338
-
339
- async setGroupWholeBan(group_id, enable = true) {
340
- const result = this.bot.sendApi('POST', 'group/muteEveryone', JSON.stringify({
341
- group: String(group_id),
342
- enable
343
- }))
344
- if (result.error) {
345
- throw result.error
346
- }
347
- }
348
-
349
- async setGroupKick(group_id, user_id, reject_add_request = false, message = '') {
350
- const result = this.bot.sendApi('POST', 'group/kick', JSON.stringify({
351
- uidList: [String(user_id)],
352
- group: String(group_id),
353
- refuseForever: reject_add_request,
354
- reason: message
355
- }))
356
- if (result.error) {
357
- throw result.error
358
- }
359
- return true
360
- }
361
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cosmopolitan/stabilityai-stable-diffusion-2-1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Stabilityai Stable Diffusion 2 1
3
- emoji: 🌍
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DHEIVER/Segmento_de_Angio_Coronariana_v3/preprocess.py DELETED
@@ -1,13 +0,0 @@
1
- import cv2
2
- import numpy as np
3
-
4
- def unsharp_masking(img, kernel_size=5, threshold=2.0):
5
- if kernel_size % 2 == 0:
6
- kernel_size += 1 # Ensure the kernel size is odd
7
- gaussian = cv2.GaussianBlur(img, (kernel_size, kernel_size), 2.0)
8
- unsharp_mask = cv2.addWeighted(img, threshold, gaussian, -1.0, 0)
9
- # Clip the pixel values to the valid range [0, 255]
10
- unsharp_mask = np.clip(unsharp_mask, 0, 255)
11
- # Normalize the image to bring pixel values back to [0, 255]
12
- cv2.normalize(unsharp_mask, unsharp_mask, 0, 255, cv2.NORM_MINMAX)
13
- return unsharp_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cu2qu/cli.py DELETED
@@ -1,198 +0,0 @@
1
- import os
2
- import argparse
3
- import logging
4
- import shutil
5
- import multiprocessing as mp
6
- from contextlib import closing
7
- from functools import partial
8
-
9
- import fontTools
10
- from .ufo import font_to_quadratic, fonts_to_quadratic
11
-
12
- ufo_module = None
13
- try:
14
- import ufoLib2 as ufo_module
15
- except ImportError:
16
- try:
17
- import defcon as ufo_module
18
- except ImportError as e:
19
- pass
20
-
21
-
22
- logger = logging.getLogger("fontTools.cu2qu")
23
-
24
-
25
- def _cpu_count():
26
- try:
27
- return mp.cpu_count()
28
- except NotImplementedError: # pragma: no cover
29
- return 1
30
-
31
-
32
- def open_ufo(path):
33
- if hasattr(ufo_module.Font, "open"): # ufoLib2
34
- return ufo_module.Font.open(path)
35
- return ufo_module.Font(path) # defcon
36
-
37
-
38
- def _font_to_quadratic(input_path, output_path=None, **kwargs):
39
- ufo = open_ufo(input_path)
40
- logger.info("Converting curves for %s", input_path)
41
- if font_to_quadratic(ufo, **kwargs):
42
- logger.info("Saving %s", output_path)
43
- if output_path:
44
- ufo.save(output_path)
45
- else:
46
- ufo.save() # save in-place
47
- elif output_path:
48
- _copytree(input_path, output_path)
49
-
50
-
51
- def _samepath(path1, path2):
52
- # TODO on python3+, there's os.path.samefile
53
- path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
54
- path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
55
- return path1 == path2
56
-
57
-
58
- def _copytree(input_path, output_path):
59
- if _samepath(input_path, output_path):
60
- logger.debug("input and output paths are the same file; skipped copy")
61
- return
62
- if os.path.exists(output_path):
63
- shutil.rmtree(output_path)
64
- shutil.copytree(input_path, output_path)
65
-
66
-
67
- def main(args=None):
68
- """Convert a UFO font from cubic to quadratic curves"""
69
- parser = argparse.ArgumentParser(prog="cu2qu")
70
- parser.add_argument("--version", action="version", version=fontTools.__version__)
71
- parser.add_argument(
72
- "infiles",
73
- nargs="+",
74
- metavar="INPUT",
75
- help="one or more input UFO source file(s).",
76
- )
77
- parser.add_argument("-v", "--verbose", action="count", default=0)
78
- parser.add_argument(
79
- "-e",
80
- "--conversion-error",
81
- type=float,
82
- metavar="ERROR",
83
- default=None,
84
- help="maxiumum approximation error measured in EM (default: 0.001)",
85
- )
86
- parser.add_argument(
87
- "-m",
88
- "--mixed",
89
- default=False,
90
- action="store_true",
91
- help="whether to used mixed quadratic and cubic curves",
92
- )
93
- parser.add_argument(
94
- "--keep-direction",
95
- dest="reverse_direction",
96
- action="store_false",
97
- help="do not reverse the contour direction",
98
- )
99
-
100
- mode_parser = parser.add_mutually_exclusive_group()
101
- mode_parser.add_argument(
102
- "-i",
103
- "--interpolatable",
104
- action="store_true",
105
- help="whether curve conversion should keep interpolation compatibility",
106
- )
107
- mode_parser.add_argument(
108
- "-j",
109
- "--jobs",
110
- type=int,
111
- nargs="?",
112
- default=1,
113
- const=_cpu_count(),
114
- metavar="N",
115
- help="Convert using N multiple processes (default: %(default)s)",
116
- )
117
-
118
- output_parser = parser.add_mutually_exclusive_group()
119
- output_parser.add_argument(
120
- "-o",
121
- "--output-file",
122
- default=None,
123
- metavar="OUTPUT",
124
- help=(
125
- "output filename for the converted UFO. By default fonts are "
126
- "modified in place. This only works with a single input."
127
- ),
128
- )
129
- output_parser.add_argument(
130
- "-d",
131
- "--output-dir",
132
- default=None,
133
- metavar="DIRECTORY",
134
- help="output directory where to save converted UFOs",
135
- )
136
-
137
- options = parser.parse_args(args)
138
-
139
- if ufo_module is None:
140
- parser.error("Either ufoLib2 or defcon are required to run this script.")
141
-
142
- if not options.verbose:
143
- level = "WARNING"
144
- elif options.verbose == 1:
145
- level = "INFO"
146
- else:
147
- level = "DEBUG"
148
- logging.basicConfig(level=level)
149
-
150
- if len(options.infiles) > 1 and options.output_file:
151
- parser.error("-o/--output-file can't be used with multile inputs")
152
-
153
- if options.output_dir:
154
- output_dir = options.output_dir
155
- if not os.path.exists(output_dir):
156
- os.mkdir(output_dir)
157
- elif not os.path.isdir(output_dir):
158
- parser.error("'%s' is not a directory" % output_dir)
159
- output_paths = [
160
- os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
161
- ]
162
- elif options.output_file:
163
- output_paths = [options.output_file]
164
- else:
165
- # save in-place
166
- output_paths = [None] * len(options.infiles)
167
-
168
- kwargs = dict(
169
- dump_stats=options.verbose > 0,
170
- max_err_em=options.conversion_error,
171
- reverse_direction=options.reverse_direction,
172
- all_quadratic=False if options.mixed else True,
173
- )
174
-
175
- if options.interpolatable:
176
- logger.info("Converting curves compatibly")
177
- ufos = [open_ufo(infile) for infile in options.infiles]
178
- if fonts_to_quadratic(ufos, **kwargs):
179
- for ufo, output_path in zip(ufos, output_paths):
180
- logger.info("Saving %s", output_path)
181
- if output_path:
182
- ufo.save(output_path)
183
- else:
184
- ufo.save()
185
- else:
186
- for input_path, output_path in zip(options.infiles, output_paths):
187
- if output_path:
188
- _copytree(input_path, output_path)
189
- else:
190
- jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
191
- if jobs > 1:
192
- func = partial(_font_to_quadratic, **kwargs)
193
- logger.info("Running %d parallel processes", jobs)
194
- with closing(mp.Pool(jobs)) as pool:
195
- pool.starmap(func, zip(options.infiles, output_paths))
196
- else:
197
- for input_path, output_path in zip(options.infiles, output_paths):
198
- _font_to_quadratic(input_path, output_path, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/smb.py DELETED
@@ -1,309 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module contains SMBFileSystem class responsible for handling access to
4
- Windows Samba network shares by using package smbprotocol
5
- """
6
-
7
- import datetime
8
- import uuid
9
- from stat import S_ISDIR, S_ISLNK
10
-
11
- import smbclient
12
-
13
- from .. import AbstractFileSystem
14
- from ..utils import infer_storage_options
15
-
16
- # ! pylint: disable=bad-continuation
17
-
18
-
19
- class SMBFileSystem(AbstractFileSystem):
20
- """Allow reading and writing to Windows and Samba network shares.
21
-
22
- When using `fsspec.open()` for getting a file-like object the URI
23
- should be specified as this format:
24
- ``smb://workgroup;user:password@server:port/share/folder/file.csv``.
25
-
26
- Example::
27
-
28
- >>> import fsspec
29
- >>> with fsspec.open(
30
- ... 'smb://myuser:[email protected]/' 'share/folder/file.csv'
31
- ... ) as smbfile:
32
- ... df = pd.read_csv(smbfile, sep='|', header=None)
33
-
34
- Note that you need to pass in a valid hostname or IP address for the host
35
- component of the URL. Do not use the Windows/NetBIOS machine name for the
36
- host component.
37
-
38
- The first component of the path in the URL points to the name of the shared
39
- folder. Subsequent path components will point to the directory/folder/file.
40
-
41
- The URL components ``workgroup`` , ``user``, ``password`` and ``port`` may be
42
- optional.
43
-
44
- .. note::
45
-
46
- For working this source require `smbprotocol`_ to be installed, e.g.::
47
-
48
- $ pip install smbprotocol
49
- # or
50
- # pip install smbprotocol[kerberos]
51
-
52
- .. _smbprotocol: https://github.com/jborean93/smbprotocol#requirements
53
-
54
- Note: if using this with the ``open`` or ``open_files``, with full URLs,
55
- there is no way to tell if a path is relative, so all paths are assumed
56
- to be absolute.
57
- """
58
-
59
- protocol = "smb"
60
-
61
- # pylint: disable=too-many-arguments
62
- def __init__(
63
- self,
64
- host,
65
- port=None,
66
- username=None,
67
- password=None,
68
- timeout=60,
69
- encrypt=None,
70
- share_access=None,
71
- **kwargs,
72
- ):
73
- """
74
- You can use _get_kwargs_from_urls to get some kwargs from
75
- a reasonable SMB url.
76
-
77
- Authentication will be anonymous or integrated if username/password are not
78
- given.
79
-
80
- Parameters
81
- ----------
82
- host: str
83
- The remote server name/ip to connect to
84
- port: int
85
- Port to connect with. Usually 445, sometimes 139.
86
- username: str or None
87
- Username to connect with. Required if Kerberos auth is not being used.
88
- password: str or None
89
- User's password on the server, if using username
90
- timeout: int
91
- Connection timeout in seconds
92
- encrypt: bool
93
- Whether to force encryption or not, once this has been set to True
94
- the session cannot be changed back to False.
95
- share_access: str or None
96
- Specifies the default access applied to file open operations
97
- performed with this file system object.
98
- This affects whether other processes can concurrently open a handle
99
- to the same file.
100
-
101
- - None (the default): exclusively locks the file until closed.
102
- - 'r': Allow other handles to be opened with read access.
103
- - 'w': Allow other handles to be opened with write access.
104
- - 'd': Allow other handles to be opened with delete access.
105
- """
106
- super(SMBFileSystem, self).__init__(**kwargs)
107
- self.host = host
108
- self.port = port
109
- self.username = username
110
- self.password = password
111
- self.timeout = timeout
112
- self.encrypt = encrypt
113
- self.temppath = kwargs.pop("temppath", "")
114
- self.share_access = share_access
115
- self._connect()
116
-
117
- def _connect(self):
118
- smbclient.register_session(
119
- self.host,
120
- username=self.username,
121
- password=self.password,
122
- port=445 if self.port is None else self.port,
123
- encrypt=self.encrypt,
124
- connection_timeout=self.timeout,
125
- )
126
-
127
- @classmethod
128
- def _strip_protocol(cls, path):
129
- return infer_storage_options(path)["path"]
130
-
131
- @staticmethod
132
- def _get_kwargs_from_urls(path):
133
- # smb://workgroup;user:password@host:port/share/folder/file.csv
134
- out = infer_storage_options(path)
135
- out.pop("path", None)
136
- out.pop("protocol", None)
137
- return out
138
-
139
- def mkdir(self, path, create_parents=True, **kwargs):
140
- wpath = _as_unc_path(self.host, path)
141
- if create_parents:
142
- smbclient.makedirs(wpath, exist_ok=False, **kwargs)
143
- else:
144
- smbclient.mkdir(wpath, **kwargs)
145
-
146
- def makedirs(self, path, exist_ok=False):
147
- if _share_has_path(path):
148
- wpath = _as_unc_path(self.host, path)
149
- smbclient.makedirs(wpath, exist_ok=exist_ok)
150
-
151
- def rmdir(self, path):
152
- if _share_has_path(path):
153
- wpath = _as_unc_path(self.host, path)
154
- smbclient.rmdir(wpath)
155
-
156
- def info(self, path, **kwargs):
157
- wpath = _as_unc_path(self.host, path)
158
- stats = smbclient.stat(wpath, **kwargs)
159
- if S_ISDIR(stats.st_mode):
160
- stype = "directory"
161
- elif S_ISLNK(stats.st_mode):
162
- stype = "link"
163
- else:
164
- stype = "file"
165
- res = {
166
- "name": path + "/" if stype == "directory" else path,
167
- "size": stats.st_size,
168
- "type": stype,
169
- "uid": stats.st_uid,
170
- "gid": stats.st_gid,
171
- "time": stats.st_atime,
172
- "mtime": stats.st_mtime,
173
- }
174
- return res
175
-
176
- def created(self, path):
177
- """Return the created timestamp of a file as a datetime.datetime"""
178
- wpath = _as_unc_path(self.host, path)
179
- stats = smbclient.stat(wpath)
180
- return datetime.datetime.utcfromtimestamp(stats.st_ctime)
181
-
182
- def modified(self, path):
183
- """Return the modified timestamp of a file as a datetime.datetime"""
184
- wpath = _as_unc_path(self.host, path)
185
- stats = smbclient.stat(wpath)
186
- return datetime.datetime.utcfromtimestamp(stats.st_mtime)
187
-
188
- def ls(self, path, detail=True, **kwargs):
189
- unc = _as_unc_path(self.host, path)
190
- listed = smbclient.listdir(unc, **kwargs)
191
- dirs = ["/".join([path.rstrip("/"), p]) for p in listed]
192
- if detail:
193
- dirs = [self.info(d) for d in dirs]
194
- return dirs
195
-
196
- # pylint: disable=too-many-arguments
197
- def _open(
198
- self,
199
- path,
200
- mode="rb",
201
- block_size=-1,
202
- autocommit=True,
203
- cache_options=None,
204
- **kwargs,
205
- ):
206
- """
207
- block_size: int or None
208
- If 0, no buffering, 1, line buffering, >1, buffer that many bytes
209
-
210
- Notes
211
- -----
212
- By specifying 'share_access' in 'kwargs' it is possible to override the
213
- default shared access setting applied in the constructor of this object.
214
- """
215
- bls = block_size if block_size is not None and block_size >= 0 else -1
216
- wpath = _as_unc_path(self.host, path)
217
- share_access = kwargs.pop("share_access", self.share_access)
218
- if "w" in mode and autocommit is False:
219
- temp = _as_temp_path(self.host, path, self.temppath)
220
- return SMBFileOpener(wpath, temp, mode, block_size=bls, **kwargs)
221
- return smbclient.open_file(
222
- wpath, mode, buffering=bls, share_access=share_access, **kwargs
223
- )
224
-
225
- def copy(self, path1, path2, **kwargs):
226
- """Copy within two locations in the same filesystem"""
227
- wpath1 = _as_unc_path(self.host, path1)
228
- wpath2 = _as_unc_path(self.host, path2)
229
- smbclient.copyfile(wpath1, wpath2, **kwargs)
230
-
231
- def _rm(self, path):
232
- if _share_has_path(path):
233
- wpath = _as_unc_path(self.host, path)
234
- stats = smbclient.stat(wpath)
235
- if S_ISDIR(stats.st_mode):
236
- smbclient.rmdir(wpath)
237
- else:
238
- smbclient.remove(wpath)
239
-
240
- def mv(self, path1, path2, **kwargs):
241
- wpath1 = _as_unc_path(self.host, path1)
242
- wpath2 = _as_unc_path(self.host, path2)
243
- smbclient.rename(wpath1, wpath2, **kwargs)
244
-
245
-
246
- def _as_unc_path(host, path):
247
- rpath = path.replace("/", "\\")
248
- unc = "\\\\{}{}".format(host, rpath)
249
- return unc
250
-
251
-
252
- def _as_temp_path(host, path, temppath):
253
- share = path.split("/")[1]
254
- temp_file = "/{}{}/{}".format(share, temppath, uuid.uuid4())
255
- unc = _as_unc_path(host, temp_file)
256
- return unc
257
-
258
-
259
- def _share_has_path(path):
260
- parts = path.count("/")
261
- if path.endswith("/"):
262
- return parts > 2
263
- return parts > 1
264
-
265
-
266
- class SMBFileOpener(object):
267
- """writes to remote temporary file, move on commit"""
268
-
269
- def __init__(self, path, temp, mode, block_size=-1, **kwargs):
270
- self.path = path
271
- self.temp = temp
272
- self.mode = mode
273
- self.block_size = block_size
274
- self.kwargs = kwargs
275
- self.smbfile = None
276
- self._incontext = False
277
- self._open()
278
-
279
- def _open(self):
280
- if self.smbfile is None or self.smbfile.closed:
281
- self.smbfile = smbclient.open_file(
282
- self.temp, self.mode, buffering=self.block_size, **self.kwargs
283
- )
284
-
285
- def commit(self):
286
- """Move temp file to definitive on success."""
287
- # TODO: use transaction support in SMB protocol
288
- smbclient.replace(self.temp, self.path)
289
-
290
- def discard(self):
291
- """Remove the temp file on failure."""
292
- smbclient.remove(self.temp)
293
-
294
- def __fspath__(self):
295
- return self.path
296
-
297
- def __iter__(self):
298
- return self.smbfile.__iter__()
299
-
300
- def __getattr__(self, item):
301
- return getattr(self.smbfile, item)
302
-
303
- def __enter__(self):
304
- self._incontext = True
305
- return self.smbfile.__enter__()
306
-
307
- def __exit__(self, exc_type, exc_value, traceback):
308
- self._incontext = False
309
- self.smbfile.__exit__(exc_type, exc_value, traceback)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/second-webui-docker/oh-no.py DELETED
@@ -1,14 +0,0 @@
1
- import gradio as gr
2
-
3
- block = gr.Blocks()
4
-
5
- def run():
6
- with block:
7
- gr.Markdown(
8
- """
9
- <p>oh no 😐 something wrong with the 🤗 hugging face servers 😐 hopefully, it will be fixed soon</p>
10
- """)
11
- block.launch(server_name="0.0.0.0", server_port=7860)
12
-
13
- if __name__ == "__main__":
14
- run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeepLabCut/MegaDetector_DeepLabCut/DLC_models/read.md DELETED
@@ -1,3 +0,0 @@
1
- These models are part of the DeepLabCut Model zoo. For details, model citations, and further information, please see: modelzoo.deeplabcut.org
2
-
3
- https://arxiv.org/abs/2203.07436v1 & http://modelzoo.deeplabcut.org
 
 
 
 
spaces/DonDoesStuff/openjourney-v4-demo/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/prompthero/openjourney-v4").launch()
 
 
 
 
spaces/DragGan/DragGan/stylegan_human/openpose/src/util.py DELETED
@@ -1,95 +0,0 @@
1
- import numpy as np
2
- import math
3
- import cv2
4
- import matplotlib
5
- from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
6
- from matplotlib.figure import Figure
7
- import numpy as np
8
- import matplotlib.pyplot as plt
9
- import cv2
10
-
11
-
12
- def padRightDownCorner(img, stride, padValue):
13
- h = img.shape[0]
14
- w = img.shape[1]
15
-
16
- pad = 4 * [None]
17
- pad[0] = 0 # up
18
- pad[1] = 0 # left
19
- pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
20
- pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
21
-
22
- img_padded = img
23
- pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
24
- img_padded = np.concatenate((pad_up, img_padded), axis=0)
25
- pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
26
- img_padded = np.concatenate((pad_left, img_padded), axis=1)
27
- pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
28
- img_padded = np.concatenate((img_padded, pad_down), axis=0)
29
- pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
30
- img_padded = np.concatenate((img_padded, pad_right), axis=1)
31
-
32
- return img_padded, pad
33
-
34
- # transfer caffe model to pytorch which will match the layer name
35
- def transfer(model, model_weights):
36
- transfered_model_weights = {}
37
- for weights_name in model.state_dict().keys():
38
- transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
39
- return transfered_model_weights
40
-
41
- # draw the body keypoint and lims
42
- def draw_bodypose(canvas, candidate, subset,show_number=False):
43
- stickwidth = 4
44
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
45
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
46
- [1, 16], [16, 18], [3, 17], [6, 18]]
47
-
48
- colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
49
- [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
50
- [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
51
- for i in range(18):
52
- for n in range(len(subset)):
53
- index = int(subset[n][i])
54
- if index == -1:
55
- continue
56
- x, y = candidate[index][0:2]
57
- cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
58
- if show_number:
59
- cv2.putText(canvas, f'{index}', (int(x), int(y)),cv2.FONT_HERSHEY_SIMPLEX, 0.6,
60
- (255,255,0), 1, cv2.LINE_AA)
61
- ## calc and print average
62
- for i in range(17):
63
- for n in range(len(subset)):
64
- index = subset[n][np.array(limbSeq[i]) - 1]
65
- if -1 in index:
66
- continue
67
- cur_canvas = canvas.copy()
68
- Y = candidate[index.astype(int), 0]
69
- X = candidate[index.astype(int), 1]
70
- mX = np.mean(X)
71
- mY = np.mean(Y)
72
- length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
73
- angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
74
- polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
75
- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
76
- canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
77
-
78
- return canvas
79
-
80
- # get max index of 2d array
81
- def npmax(array):
82
- arrayindex = array.argmax(1)
83
- arrayvalue = array.max(1)
84
- i = arrayvalue.argmax()
85
- j = arrayindex[i]
86
- return i, j
87
-
88
- # get max index of 2d array
89
- def npmax_with_score(array):
90
- arrayindex = array.argmax(1)
91
- arrayvalue = array.max(1)
92
- i = arrayvalue.argmax()
93
- j = arrayindex[i]
94
- score =array[i][j]
95
- return i, j,score