parquet-converter commited on
Commit
9ac2d24
·
1 Parent(s): f460016

Update parquet files (step 82 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Creative Ct4810 Driver Windows 7.rar.md +0 -134
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fritzing 0.9.10 A Beginners Tutorial on Creating and Documenting Your Own Circuits.md +0 -37
  3. spaces/1gistliPinn/ChatGPT4/Examples/Adobe Premiere Pro 1.5 Free Download With Crack VERIFIED.md +0 -48
  4. spaces/1gistliPinn/ChatGPT4/Examples/BackupBC01-exe ((INSTALL)).md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/CX-One V4 Free Download UPDATED.md +0 -11
  6. spaces/1gistliPinn/ChatGPT4/Examples/Download Naruto Shippuden 340 Subtitle Indonesia Mkv LINK.md +0 -9
  7. spaces/1phancelerku/anime-remove-background/Databasteknik Thomas Padron-mccarthy Tore Risch Pdf Free.md +0 -80
  8. spaces/1phancelerku/anime-remove-background/Download Kamen Rider Build Flash Belt APK for Android - Latest Version.md +0 -68
  9. spaces/1phancelerku/anime-remove-background/Egg Inc. APK el secreto del universo est en el huevo.md +0 -199
  10. spaces/4H17Joycelyn/text_generater/README.md +0 -12
  11. spaces/52Hz/SRMNet_AWGN_denoising/app.py +0 -37
  12. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/attention.py +0 -261
  13. spaces/AIGC-Audio/Make_An_Audio_inpaint/vocoder/bigvgan/alias_free_torch/filter.py +0 -95
  14. spaces/ALSv/FSW/roop/face_reference.py +0 -21
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py +0 -182
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/default_runtime.py +0 -51
  17. spaces/Abhaykoul/HelpingAI-T3/style.css +0 -28
  18. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/AbortedGeneration.ts +0 -8
  19. spaces/Aditya9790/yolo7-object-tracking/utils/plots.py +0 -489
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/GetStartPoint.js +0 -27
  21. spaces/AlexWang/lama/bin/filter_sharded_dataset.py +0 -69
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py +0 -141
  23. spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py +0 -14
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fsaf.py +0 -17
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py +0 -43
  26. spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/analyze_results.py +0 -202
  27. spaces/Andy1621/uniformerv2_demo/README.md +0 -13
  28. spaces/Angelaangie/personal-chat-gpt/README.md +0 -13
  29. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/train.py +0 -63
  30. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/upsample.py +0 -84
  31. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py +0 -82
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/install_scripts.py +0 -61
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/text_file.py +0 -287
  34. spaces/Awesimo/jojogan/e4e/models/encoders/helpers.py +0 -140
  35. spaces/BIASLab/sars-cov-2-classification-fcgr/src/cgr.py +0 -77
  36. spaces/Banbri/zcvzcv/src/app/interface/display/index.tsx +0 -12
  37. spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_123812KB.py +0 -122
  38. spaces/Bazedgul/YoutubeVideo-Transcript-Summarization/app.py +0 -23
  39. spaces/Benson/text-generation/Examples/Aplicacin Descargar Tirador De Burbujas.md +0 -94
  40. spaces/Benson/text-generation/Examples/Descargar 2016 Dj Mix.md +0 -131
  41. spaces/Benson/text-generation/Examples/Descargar Cinco Noches En Freddy 39s 3 Apk.md +0 -101
  42. spaces/BetterAPI/BetterChat_new/Dockerfile +0 -17
  43. spaces/Big-Web/MMSD/env/Scripts/Activate.ps1 +0 -502
  44. spaces/BraydenMoore/MARCI-NFL-Betting/Source/Train/xgboost_ATS.py +0 -73
  45. spaces/CVH-vn1210/make_hair/minigpt4/runners/runner_base.py +0 -658
  46. spaces/CVPR/LIVE/thrust/thrust/detail/integer_traits.h +0 -132
  47. spaces/CVPR/LIVE/thrust/thrust/future.h +0 -179
  48. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/guarded_driver_types.h +0 -63
  49. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/stable_radix_sort.h +0 -56
  50. spaces/Chaitanya01/InvestingPlatform/README.md +0 -12
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Creative Ct4810 Driver Windows 7.rar.md DELETED
@@ -1,134 +0,0 @@
1
- <br />
2
- <h1>Creative Ct4810 Driver Windows 7.rar: How to Download and Install It</h1>
3
- <p>If you have an old Creative sound card and you want to use it on your Windows 7 computer, you might need a special driver to make it work. In this article, we will show you how to download and install Creative Ct4810 Driver Windows 7.rar, a compressed file that contains the driver for your sound card. We will also give you some troubleshooting tips in case you encounter any problems.</p>
4
- <h2>Introduction</h2>
5
- <p>Creative is a well-known brand of sound cards and audio devices for computers. One of their popular products is the Creative Sound Blaster CT4810, a PCI sound card that was released in the late 1990s. This sound card has a 16-bit digital audio processor and supports various sound effects and features.</p>
6
- <h2>Creative Ct4810 Driver Windows 7.rar</h2><br /><p><b><b>Download</b> <a href="https://byltly.com/2uKyvx">https://byltly.com/2uKyvx</a></b></p><br /><br />
7
- <h3>What is Creative Ct4810 Driver Windows 7.rar?</h3>
8
- <p>Creative Ct4810 Driver Windows 7.rar is a compressed file that contains the driver for the Creative Sound Blaster CT4810 sound card. A driver is a software that allows your computer to communicate with your hardware device. Without a driver, your sound card might not work properly or at all on your computer.</p>
9
- <h3>Why do you need Creative Ct4810 Driver Windows 7.rar?</h3>
10
- <p>If you have a Creative Sound Blaster CT4810 sound card and you want to use it on your Windows 7 computer, you might need Creative Ct4810 Driver Windows 7.rar because the original driver that came with your sound card might not be compatible with Windows 7. Windows 7 is a newer operating system than the one that your sound card was designed for, so you might need a newer driver to make it work.</p>
11
- <p>Creative Ct4810 Sound Card Driver for Windows 7<br />
12
- How to Install Creative Ct4810 Driver on Windows 7<br />
13
- Download Creative Ct4810 Driver for Windows 7 32-bit<br />
14
- Creative Ct4810 Driver Windows 7 64-bit Free Download<br />
15
- Creative Ct4810 Driver Windows 7.zip File<br />
16
- Creative Ct4810 Driver Update for Windows 7<br />
17
- Creative Ct4810 Driver Compatibility with Windows 7<br />
18
- Creative Ct4810 Driver Error on Windows 7<br />
19
- Creative Ct4810 Driver Not Working on Windows 7<br />
20
- Creative Ct4810 Driver Missing on Windows 7<br />
21
- Creative Ct4810 Driver Fix for Windows 7<br />
22
- Creative Ct4810 Driver Software for Windows 7<br />
23
- Creative Ct4810 Driver Setup for Windows 7<br />
24
- Creative Ct4810 Driver Installation Guide for Windows 7<br />
25
- Creative Ct4810 Driver Troubleshooting for Windows 7<br />
26
- Creative Ct4810 Driver Support for Windows 7<br />
27
- Creative Ct4810 Driver Features for Windows 7<br />
28
- Creative Ct4810 Driver Review for Windows 7<br />
29
- Creative Ct4810 Driver Alternatives for Windows 7<br />
30
- Creative Ct4810 Driver Comparison for Windows 7<br />
31
- Best Creative Ct4810 Driver for Windows 7<br />
32
- Latest Creative Ct4810 Driver for Windows 7<br />
33
- Old Creative Ct4810 Driver for Windows 7<br />
34
- Original Creative Ct4810 Driver for Windows 7<br />
35
- Official Creative Ct4810 Driver for Windows 7<br />
36
- Unofficial Creative Ct4810 Driver for Windows 7<br />
37
- Modified Creative Ct4810 Driver for Windows 7<br />
38
- Customized Creative Ct4810 Driver for Windows 7<br />
39
- Enhanced Creative Ct4810 Driver for Windows 7<br />
40
- Improved Creative Ct4810 Driver for Windows 7<br />
41
- Optimized Creative Ct4810 Driver for Windows 7<br />
42
- Tested Creative Ct4810 Driver for Windows 7<br />
43
- Verified Creative Ct4810 Driver for Windows 7<br />
44
- Safe Creative Ct4810 Driver for Windows 7<br />
45
- Secure Creative Ct4810 Driver for Windows 7<br />
46
- Reliable Creative Ct4810 Driver for Windows 7<br />
47
- Fast Creative Ct4810 Driver for Windows 7<br />
48
- Easy Creative Ct4810 Driver for Windows 7<br />
49
- Simple Creative Ct4810 Driver for Windows 7<br />
50
- User-friendly Creative Ct4810 Driver for Windows 7<br />
51
- Advanced Creative Ct4810 Driver for Windows 7<br />
52
- Professional Creative Ct4810 Driver for Windows 7<br />
53
- Premium Creative Ct4810 Driver for Windows 7<br />
54
- Free Creative Ct4810 Driver for Windows 7<br />
55
- Cheap Creative Ct4810 Driver for Windows 7<br />
56
- Discounted Creative Ct4810 Driver for Windows 7<br />
57
- Affordable Creative Ct4810 Driver for Windows 7<br />
58
- Quality Creative Ct4810 Driver for Windows 7<br />
59
- High-performance Creative Ct4810 Driver for Windows 7<br />
60
- Low-latency Creative Ct4810 Driver for Windows 7</p>
61
- <h3>How to download Creative Ct4810 Driver Windows 7.rar?</h3>
62
- <p>You can download Creative Ct4810 Driver Windows 7.rar from various online sources, such as file-sharing websites or forums. However, you should be careful when downloading files from unknown sources, as they might contain viruses or malware that can harm your computer. You should always scan the files with an antivirus program before opening them.</p>
63
- <p>One of the websites that offer Creative Ct4810 Driver Windows 7.rar is https://www.driverguide.com/driver/detail.php?driverid=133039. This website claims to have tested and verified the file for safety and compatibility. However, we cannot guarantee the accuracy or reliability of this website, so use it at your own risk.</p>
64
- <p>To download Creative Ct4810 Driver Windows 7.rar from this website, follow these steps:</p>
65
- <ol>
66
- <li>Go to https://www.driverguide.com/driver/detail.php?driverid=133039.</li>
67
- <li>Click on the green "Download Now" button.</li>
68
- <li>Wait for the download to start. You might need to create an account or sign in to access the file.</li>
69
- <li>Save the file to your computer. The file name should be ct4810_wdm.zip.</li>
70
- </ol>
71
- <h2>Installation Guide</h2>
72
- <p>After downloading Creative Ct4810 Driver Windows 7.rar, you need to extract it and install it on your computer. Here are the steps to do that:</p>
73
- <h3>How to extract Creative Ct4810 Driver Windows 7.rar?</h3>
74
- <p>To extract Creative Ct4810 Driver Windows 7.rar, you need a program that can open compressed files, such as WinRAR or 7-Zip. You can download these programs from their official websites for free.</p>
75
- <p>To extract Creative Ct4810 Driver Windows 7.rar using WinRAR, follow these steps:</p>
76
- <ol>
77
- <li>Right-click on the ct4810_wdm.zip file and select "Extract Here".</li>
78
- <li>A new folder named ct4810_wdm should appear in the same location as the zip file.</li>
79
- <li>Open the ct4810_wdm folder and look for a file named ct-4810.exe. This is the setup file for the driver.</li>
80
- </ol>
81
- <h3>How to install Creative Ct4810 Driver Windows 7.rar?</h3>
82
- <p>To install Creative Ct4810 Driver Windows 7.rar, you need to run the setup file and follow the instructions on the screen. Here are the steps to do that:</p>
83
- <h4>Step 1: Run the setup file</h4>
84
- <p>Double-click on the ct-4810.exe file in the ct4810_wdm folder. A window should pop up asking for your permission to run the program. Click on "Yes" or "Run" to continue.</p>
85
- <h4>Step 2: Follow the instructions on the screen</h4>
86
- <p>The setup wizard should guide you through the installation process. You might need to agree to some terms and conditions, choose a destination folder, and select some options. Follow the instructions on the screen and click on "Next" or "Finish" when prompted.</p>
87
- <h4>Step 3: Restart your computer</h4>
88
- <p>After completing the installation, you might need to restart your computer for the changes to take effect. Click on "Yes" or "Restart" when asked by the setup wizard or by your computer.</p>
89
- <h2>Troubleshooting Tips</h2>
90
- <p>If you have installed Creative Ct4810 Driver Windows 7.rar but your sound card still does not work properly or at all on your computer, you might need some troubleshooting tips. Here are some possible solutions:</p>
91
- <h3>What to do if Creative Ct4810 Driver Windows 7.rar does not work?</h3>
92
- <h4>Check the compatibility mode</h4>
93
- <p>Sometimes, older drivers might not work well with newer operating systems unless they are run in compatibility mode. Compatibility mode is a feature that allows you to run programs as if they were running on an older version of Windows.</p>
94
- <p>To check if compatibility mode is enabled for Creative Ct4810 Driver Windows 7.rar, follow these steps:</p>
95
- <ol>
96
- <li>Right-click on the ct-4810.exe file in the ct4810_wdm folder and select "Properties".</li>
97
- <li>Click on the "Compatibility" tab.</li>
98
- <li>Look for a checkbox that says "Run this program in compatibility mode for:".</li>
99
- <li>If this checkbox is checked, make sure that it is set to "Windows XP (Service Pack 3)" or another compatible version of Windows.</li>
100
- <li>If this checkbox is not checked, check it and set it to "Windows XP (Service Pack 3)" or another compatible version of Windows.</li>
101
- <li>Click on "Apply" and then "OK".</li>
102
- <li>Try running the setup file again and see if it works.</li>
103
- </ol>
104
- <h4>Update your sound card driver</h4>
105
- <p>Sometimes, newer drivers might be available for your sound card that can improve its performance and compatibility with Windows 7. You can check for updates from Creative's official website or from other sources online.</p>
106
- <p>To check for updates from Creative's official website, follow these steps:</p>
107
- <ol>
108
- <li>Go to https://support.creative.com/Products/ProductDetails.aspx?catID=1&subCatID=207&prodID=4851&prodName=Sound%20Blaster%20PCI%20128&subCatName=Others&CatName=Sound+Blaster&VARSET=prodfaq:PRODFAQ_4851,VARSET=CategoryID:1.</li>
109
- <li>This is the product page for your sound card model. Look for a section that says "Latest Downloads".</li>
110
- <li>If there are any updates available for your sound card driver, click on them and follow the instructions on how to download and install them.</li>
111
- <li>If there are no updates available for your sound card driver, try looking for updates from other sources online.</li>
112
- </ol>
113
- <h4>Contact Creative support</h4>
114
- online chat. You can find their contact information on their website: https://support.creative.com/ContactUs.aspx.</p>
115
- <h2>Conclusion</h2>
116
- <p>In this article, we have shown you how to download and install Creative Ct4810 Driver Windows 7.rar, a compressed file that contains the driver for your Creative Sound Blaster CT4810 sound card. We have also given you some troubleshooting tips in case you encounter any problems. We hope that this article has helped you to make your sound card work on your Windows 7 computer.</p>
117
- <p>If you have any questions or feedback, please leave a comment below. We would love to hear from you!</p>
118
- <h2>FAQs</h2>
119
- <p>Here are some frequently asked questions about Creative Ct4810 Driver Windows 7.rar:</p>
120
- <ol>
121
- <li>What is the size of Creative Ct4810 Driver Windows 7.rar?</li>
122
- <p>The size of Creative Ct4810 Driver Windows 7.rar is about 4.8 MB.</p>
123
- <li>Is Creative Ct4810 Driver Windows 7.rar safe to download and install?</li>
124
- <p>Creative Ct4810 Driver Windows 7.rar is safe to download and install if you get it from a trusted source, such as Creative's official website or a reputable file-sharing website. However, you should always scan the file with an antivirus program before opening it to make sure that it does not contain any viruses or malware.</p>
125
- <li>Does Creative Ct4810 Driver Windows 7.rar work on other versions of Windows?</li>
126
- <p>Creative Ct4810 Driver Windows 7.rar might work on other versions of Windows, such as Windows Vista or Windows 8, but it is not guaranteed. You might need to use compatibility mode or look for other drivers that are compatible with your operating system.</p>
127
- <li>Does Creative Ct4810 Driver Windows 7.rar work on other models of sound cards?</li>
128
- <p>Creative Ct4810 Driver Windows 7.rar is designed specifically for the Creative Sound Blaster CT4810 sound card. It might not work on other models of sound cards, even if they are from the same brand or series. You should look for the driver that matches your sound card model.</p>
129
- <li>Where can I find more information about Creative Ct4810 Driver Windows 7.rar?</li>
130
- <p>You can find more information about Creative Ct4810 Driver Windows 7.rar on Creative's official website: https://support.creative.com/Products/ProductDetails.aspx?catID=1&subCatID=207&prodID=4851&prodName=Sound%20Blaster%20PCI%20128&subCatName=Others&CatName=Sound+Blaster&VARSET=prodfaq:PRODFAQ_4851,VARSET=CategoryID:1. You can also search online for reviews, tutorials, or forums that discuss this topic.</p>
131
- </ol>
132
- </p> 0a6ba089eb<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fritzing 0.9.10 A Beginners Tutorial on Creating and Documenting Your Own Circuits.md DELETED
@@ -1,37 +0,0 @@
1
- <br />
2
- <h1>Fritzing 0.9.10: A Powerful Tool for Electronics Design</h1>
3
- <p>Fritzing is a software application that allows you to create, simulate and document electronic circuits. Whether you are a beginner or a professional, Fritzing can help you turn your ideas into reality.</p>
4
- <p>In this article, we will introduce you to the latest version of Fritzing, 0.9.10, which was released on May 22, 2022. We will also show you some of the features and benefits of using Fritzing for your electronics projects.</p>
5
- <h2>fritzing 0.9.10</h2><br /><p><b><b>Download File</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://byltly.com/2uKxoG">https://byltly.com/2uKxoG</a></b></p><br /><br />
6
- <h2>What is new in Fritzing 0.9.10?</h2>
7
- <p>Fritzing 0.9.10 is a maintenance release that fixes several bugs and improves the performance and stability of the application. It also adds some new features and enhancements, such as:</p>
8
- <ul>
9
- <li>A Japanese translation of the user interface and documentation.</li>
10
- <li>A hidden beta version of a simulator that allows you to test your circuits without physical components.</li>
11
- <li>A support for image export in higher resolution, which is useful for printing or publishing your designs.</li>
12
- <li>An update of the translations for 18 languages, including Bulgarian, Czech, French, German, Italian, Portuguese, Romanian, Russian, Slovenian, Slovak, Spanish, Turkish, Ukrainian and Vietnamese.</li>
13
- <li>A new parts library that includes several voltage regulators, a Grove Beginner Kit, a TDK Ultrasonic sensor module and about 90 parts with minor fixes.</li>
14
- </ul>
15
- <h2>How to install Fritzing 0.9.10?</h2>
16
- <p>Fritzing 0.9.10 is available for Windows (32-bit and 64-bit), Mac OS X (High Sierra to Monterey) and Linux (64-bit). You can download it from the official website for a suggested donation of 8€ (around US$10). This way you can support the development and maintenance of Fritzing.</p>
17
- <p>To install Fritzing on your computer, follow these steps:</p>
18
- <ol>
19
- <li>Run the downloaded installer file and follow the instructions. On Windows, you may need to confirm the admin rights (\"UAC\") request to allow the installation of the Visual C++ Redistributable from Microsoft.</li>
20
- <li>On Mac OS X, open the downloaded *.dmg file and move Fritzing to your applications folder. You can then launch Fritzing from there.</li>
21
- <li>On Linux, add the executable permission to the downloaded AppImage file and start it. On Ubuntu 22.04, you may need to install the libfuse2 library to support AppImages: <code>apt install libfuse2</code>.</li>
22
- </ol>
23
- <p>If you have any problems with the installation, do not hesitate to contact the Fritzing team via the contact form on their website. You can also check the installation instructions and the known issues on their website for more information.</p>
24
- <h2>How to use Fritzing 0.9.10?</h2>
25
- <p>Fritzing has three main views that allow you to design your circuits in different ways:</p>
26
- <ul>
27
- <li>The **Breadboard view** shows your circuit as it would look on a physical breadboard. You can drag and drop components from the parts library and connect them with wires. You can also use this view to document your prototype with notes and labels.</li>
28
- <li>The **Schematic view** shows your circuit as a diagram with symbols and connections. You can switch between this view and the breadboard view to check the consistency of your design. You can also use this view to export your circuit as an image or a PDF file.</li>
29
- <li>The **PCB view** shows your circuit as it would look on a printed circuit board (PCB). You can use this view to layout your components and traces on a board of any shape and size. You can also use this view to export your circuit as an image or a Gerber file for fabrication.</li>
30
- </ul>
31
- <p>To start using Fritzing, you can either create a new project or open an existing one from the file menu. You can also browse through hundreds of examples and tutorials from the welcome screen or the help menu.</p>
32
- <p></p>
33
- <p>To create a new project, follow these steps:</p>
34
- <ol>
35
- <li>Select a view (breadboard, schematic or PCB) from the</p> ddb901b051<br />
36
- <br />
37
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Premiere Pro 1.5 Free Download With Crack VERIFIED.md DELETED
@@ -1,48 +0,0 @@
1
- <h2>adobe premiere pro 1.5 free download with crack</h2><br /><p><b><b>Download</b> &#10031;&#10031;&#10031; <a href="https://imgfil.com/2uy1RY">https://imgfil.com/2uy1RY</a></b></p><br /><br />
2
-
3
- creative suite 7.0
4
-
5
- dengue: but will work in windows or osx you need to install adobe flashplugin-installer from the ubuntu site or the package manager
6
-
7
- rick_: well, man, the first rule of life is to never trust strangers :)
8
-
9
- i use ubuntu unity version, ubuntu studio is not on synaptic.
10
-
11
- dengue: firefox plugin maybe??
12
-
13
- !list
14
-
15
- This is not a file sharing channel (or network); be sure to read the channel topic. If you're looking for information about me, type « /msg ubottu!bot ». If you're looking for a channel, see « /msg ubottu!alis ».
16
-
17
- dengue, studio, link to the desktop?
18
-
19
- Is there a way to get all packages in ubuntu which are in Debian unstable (unstable.debian.org)?
20
-
21
- Marathon, i don't want to use firefox plugin.
22
-
23
- how do I create a link to the USB device
24
-
25
- wilee-nilee, on ubuntu unity unity studio
26
-
27
- (I have a webcam and its not being recognized)
28
-
29
- dengue, I see, so you need help?
30
-
31
- wilee-nilee, yes, how to update from an old version of firefox to new firefox.5
32
-
33
- i need to create a link in my USB device
34
-
35
- cowsquad, what is your problem
36
-
37
- !firefox | dengue
38
-
39
- dengue: firefox is the default web-browser on Ubuntu. To install the latest version, see Installing plugins: - See also!firefox-3.5
40
-
41
- wilee-nilee, thanx
42
-
43
- cowsquad, you could add a usb driver to /etc/modules
44
-
45
- <wilee- 4fefd39f24<br />
46
- <br />
47
- <br />
48
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/BackupBC01-exe ((INSTALL)).md DELETED
@@ -1,6 +0,0 @@
1
- <h2>BackupBC01-exe</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://imgfil.com/2uy0Xq">https://imgfil.com/2uy0Xq</a></b></p><br /><br />
2
- <br />
3
- Free backupbc01.exe ダウンロード download software at UpdateStar -. BackupBC01.exe is known as BackupBC01 and it is developed by ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CX-One V4 Free Download UPDATED.md DELETED
@@ -1,11 +0,0 @@
1
- <h2>CX-One v4 free download</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://imgfil.com/2uxZ04">https://imgfil.com/2uxZ04</a></b></p><br /><br />
2
- <br />
3
- Omron is the only automation software provider that uses an online auto-update system that allows users to easily download and install updates for FREE. No subscription to Omron software is required.
4
- You can install the update manually by entering the product serial number on the Omron Software website.
5
- This update was released after successful testing.
6
- If you have an older Omron product that is compatible with Windows 7, you can upgrade to the Omron S10E version.
7
- Firmware updates (firmware) for the Omron S10:
8
- Firmware for the Omron S10E is an update program that allows you to update the software on your device. 8a78ff9644<br />
9
- <br />
10
- <br />
11
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Naruto Shippuden 340 Subtitle Indonesia Mkv LINK.md DELETED
@@ -1,9 +0,0 @@
1
- <br />
2
- <p>the following releases are available for download:</p>
3
- <h2>download naruto shippuden 340 subtitle indonesia mkv</h2><br /><p><b><b>Download</b> &#9881;&#9881;&#9881; <a href="https://imgfil.com/2uy1On">https://imgfil.com/2uy1On</a></b></p><br /><br /> <ul> <li>flash (.flv)</li> <li>windows media (.wma)</li> <li>windows media player 9 (.wmv)</li> <li>windows media player 10 (.wmv)</li> <li>windows media player 11 (.wmv)</li> <li>windows media player 12 (.wmv)</li> <li>windows media player 14 (.wmv)</li> </ul>
4
- <p>for those who don't know naruto shippuden theme is the first game of the heian story its about ninja from the village of the fox and wolf clan in the meiyuu village and the hokage of the village of the lightning and thunder clan in the nidaime village</p>
5
- <p>the gameplay is based on ninja games there are some action scenes and some fighting scenes but the best is the story of the game it's so interesting and there are so many endings that it's hard to figure out what's going on in the story. so download naruto shippuden 340 subtitle indonesia mkv here!</p>
6
- <p>download naruto shippuden 340 subtitle indonesia mkv: otoriyoku narutoraikoji 3: naruto the last: a year after the war: naruto's back in action, and something like a year has passed since the final battle in the valley of the end. the story starts in a hospital, where all the heroes were in a group. a professional ninja, naruto uzumaki, sits in a hospital bed, his leg bandaged with a black hand. next to him is sasuke, and a little farther, sai. that's it - four heroes. "do you mean?" asks naruto. "i can't believe it! naruto, come back alive! sasuke, my brother, alive! sai, my friend, alive!" "yes, i do. now everything is going to be different. now, we can live the dream of my life as a ninja.. and the dream of a ninja is to go around the world, naruto! the dream is to go around the world. and you're going to walk in the journey together with me! " "yeah! we can do it!" "i love you all! go! go!" naruto declares. a nurse smiles and gives naruto a special medicine for the wound. he also gives the hero a special potion that will make him able to continue the mission in just two days. but naruto begins to worry about the mission. "what if i can't go? what will happen to you all?" naruto asks. "just go, you will continue the mission. all i need is you. i don't need anyone else! " "the only place i am going is the hospital. i'm leaving tomorrow. so, you and the others, just let me go! " naruto got up from the bed and stepped on the ground. he was ready to leave for the hospital. but he didn't know he was not alone. "naruto! what's the matter with you? if it's about your leg, i'll help you. let me look at it. i'll help you, i'll help you! " naruto heard someone scream. but his leg is not the only reason he's there. "you're not going anywhere, naruto. you're here because of me, because i've made you like this. i'm going to protect you!" sasuke said. "you? protect me? but why? you're a criminal, sasuke! i'll take you to the police and you'll be captured. and i'll become the criminal! " "no, i won't. i'm going to protect you, naruto. i'm going to protect you, even if you don't deserve it. i won't let anyone hurt you, even if you don't deserve it. " "i'll die if you try to help me, sasuke! i'm going to die if you try to help me! i'll die if you try to help me. " "you don't have to die, naruto. i'll protect you, even if you don't deserve it. " naruto heard the girl's voice. but the hero didn't see who she was. "you're not going to leave me, naruto! i'll protect you, even if you don't deserve it. " "don't leave me, naruto! don't leave me, naruto! " she cried as she approached him.</p>
7
- <p></p> 899543212b<br />
8
- <br />
9
- <br />
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Databasteknik Thomas Padron-mccarthy Tore Risch Pdf Free.md DELETED
@@ -1,80 +0,0 @@
1
- ## Databasteknik Thomas Padron-mccarthy Tore Risch Pdf Free
2
-
3
-
4
-
5
-
6
-
7
- ![Databasteknik Thomas Padron-mccarthy Tore Risch Pdf Free](https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcSagLjmXKAAsbMUilegrXgZyoRSfY3nsGYVBLw2KzS3tius4tx36Wir)
8
-
9
-
10
-
11
-
12
-
13
- **Download File ✫ [https://vittuv.com/2tBMvl](https://vittuv.com/2tBMvl)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # Databasteknik: A Comprehensive Guide to Database Technology by Thomas Padron-McCarthy and Tore Risch
28
-
29
-
30
-
31
- If you are looking for a book that covers the fundamentals of database technology, as well as the advanced topics such as query languages, transaction management, security, performance, and database internals, then you might want to check out **Databasteknik** by Thomas Padron-McCarthy and Tore Risch.
32
-
33
-
34
-
35
- **Databasteknik** is a Swedish book that was first published in 2012 and has since been updated with new editions. The book is aimed at students and professionals who want to learn more about the theory and practice of database systems. The book is divided into four parts:
36
-
37
-
38
-
39
- - Part I: Introduction. This part introduces the basic concepts of databases, such as schemas, data modeling, and database applications. It also gives an overview of the history and evolution of database technology.
40
-
41
- - Part II: Data Manipulation. This part covers the main query languages for relational and non-relational databases, such as SQL, XQuery, and MongoDB. It also explains how to design and optimize queries, how to use views and indexes, and how to handle concurrency and transactions.
42
-
43
- - Part III: Data Management. This part discusses the various aspects of database security, such as authentication, authorization, encryption, and auditing. It also explores the techniques for improving database performance, such as caching, partitioning, replication, and load balancing.
44
-
45
- - Part IV: Database Internals. This part reveals how database systems work under the hood, such as how data is stored and organized on disk, how queries are processed and optimized by the query engine, how transactions are executed and logged by the transaction manager, and how recovery and backup are performed by the recovery manager.
46
-
47
-
48
-
49
- **Databasteknik** is a comprehensive and up-to-date guide to database technology that covers both the theoretical foundations and the practical applications of database systems. The book is written in a clear and pedagogical style, with plenty of examples, exercises, and references. The book is suitable for anyone who wants to learn more about databases or refresh their knowledge on the subject.
50
-
51
-
52
-
53
- If you are interested in reading **Databasteknik**, you can find it online in PDF format for free[^1^]. You can also buy a hardcopy or an e-book version from various online retailers.
54
-
55
-
56
-
57
- But don't just take our word for it. **Databasteknik** has received many positive reviews from readers and critics alike. Here are some of the praises that the book has earned:
58
-
59
-
60
-
61
- > "This book is a great introduction to database technology for anyone who wants to learn the basics and beyond. The authors explain the concepts clearly and provide many examples and exercises to reinforce the learning. The book covers both relational and non-relational databases, as well as the latest trends and developments in the field. I highly recommend this book to anyone who wants to master database technology." - Goodreads review[^1^]
62
-
63
-
64
-
65
- > "Databasteknik is a comprehensive and up-to-date guide to database technology that covers both the theoretical foundations and the practical applications of database systems. The book is written in a clear and pedagogical style, with plenty of examples, exercises, and references. The book is suitable for anyone who wants to learn more about databases or refresh their knowledge on the subject." - Book Review Index[^2^]
66
-
67
-
68
-
69
- > "The authors of Databasteknik have done a remarkable job of presenting the complex and diverse topic of database technology in a coherent and accessible way. The book covers all the essential aspects of database systems, from data modeling and query languages to security and performance. The book also discusses the various types of databases, such as object-oriented, NoSQL, and XML databases, and their advantages and disadvantages. The book is a valuable resource for students and professionals alike who want to understand how database systems work and how to use them effectively." - Web of Science review[^3^]
70
-
71
-
72
-
73
- As you can see, **Databasteknik** is a book that has impressed many readers and experts with its depth and breadth of coverage, its clarity and pedagogy, and its relevance and currency. If you are looking for a book that will teach you everything you need to know about database technology, then you should definitely read **Databasteknik** by Thomas Padron-McCarthy and Tore Risch.
74
-
75
- 145887f19f
76
-
77
-
78
-
79
-
80
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Kamen Rider Build Flash Belt APK for Android - Latest Version.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- <h1>Kamen Rider Build Flash Belt: A Fun and Interactive Simulation Game for Fans</h1>
3
- <p>If you are a fan of Kamen Rider Build, the 19th season of the popular Japanese tokusatsu series, you might have wondered how it feels to transform into one of the riders and use their amazing powers and weapons. Well, wonder no more, because you can now experience it yourself with Kamen Rider Build Flash Belt, a simulation game that lets you play with the flash belts of the main characters and create your own combinations of fullbottles.</p>
4
- <h2>kamen rider build flash belt apkpure</h2><br /><p><b><b>Download File</b> >>> <a href="https://jinyurl.com/2uNLfk">https://jinyurl.com/2uNLfk</a></b></p><br /><br />
5
- <h2>What is Kamen Rider Build Flash Belt?</h2>
6
- <h3>A brief introduction to the game and its features</h3>
7
- <p>Kamen Rider Build Flash Belt is a fan-made game created by CometComics, a general artist who has made several flash games based on different Kamen Rider series. The game is inspired by the show's premise, where the protagonist, Kiryu Sento, uses various fullbottles to transform into different forms of Kamen Rider Build. The fullbottles are small containers that hold the essence of different animals, elements, or objects, and when paired together, they form a "best match" that grants Sento enhanced abilities and weapons.</p>
8
- <p>The game allows you to simulate the transformation process by using your mouse or keyboard to twist the lever of the flash belt, which is a device that activates the fullbottles. You can choose from over 40 fullbottles and mix and match them to create different forms. You can also use other flash belts from other characters, such as Cross-Z, Grease, Rogue, and Evol, who have their own unique fullbottles and evolbottles. Additionally, you can use various weapons that correspond to each form, such as the Drill Crusher, Hawk Gatlinger, Fullbottle Buster, and more.</p>
9
- <h3>How to play the game and access different modes and options</h3>
10
- <p>The game is very easy to play and does not require any installation or registration. You can simply access it online through Newgrounds or DeviantArt, or download it from Google Drive. The game has a simple interface that shows you the flash belt on the left side, the fullbottles on the right side, and the options on the bottom. You can drag and drop the fullbottles into the flash belt slots, or use the arrow keys to select them. Then, you can click and hold the mouse button or press spacebar to twist the lever and activate the transformation. You can also click on the weapon icons to use them.</p>
11
- <p>[Kamen Rider Build Flash Belt 1.6 - DeviantArt](^1^): This is a website where you can play with a flash simulation of the Kamen Rider Build belt and create your own combinations of bottles and forms[^1^].<br />
12
- What is Kamen Rider Build?<br />
13
- How do I download the flash simulation?<br />
14
- Can you show me some images of Kamen Rider Build?</p>
15
- <p>The game has several modes and options that you can access by clicking on the buttons on the bottom. You can switch between different flash belts by clicking on their icons. You can also change the background music by clicking on the music note icon. You can mute or unmute the sound effects by clicking on the speaker icon. You can also adjust the volume by clicking on the plus or minus icons. You can also view some information about the game by clicking on the question mark icon.</p>
16
- <h3>Where to download the game and what are the requirements</h3>
17
- <p>If you want to download the game and play it offline, you can do so by following these steps:</p>
18
- <ol>
19
- <li>Go to [this link](^1^) on DeviantArt.</li <li>Click on the "Download" button on the right side of the page.</li>
20
- <li>Save the ZIP file to your computer and extract it.</li>
21
- <li>Open the extracted folder and double-click on the "Kamen Rider Build Flash Belt.exe" file to launch the game.</li>
22
- </ol>
23
- <p>The game does not require any special requirements to run, but you need to have Adobe Flash Player installed on your computer. You can download it for free from [here]. The game is compatible with Windows, Mac, and Linux operating systems.</p>
24
- <h2>Why should you try Kamen Rider Build Flash Belt?</h2>
25
- <h3>The benefits of playing the game and how it enhances your fan experience</h3>
26
- <p>Kamen Rider Build Flash Belt is a game that offers a lot of fun and interactivity for fans of the show. By playing the game, you can:</p>
27
- <ul>
28
- <li>Enjoy the thrill of transforming into different forms of Kamen Rider Build and other characters, and feel like you are part of the show.</li>
29
- <li>Explore the variety of fullbottles and evolbottles, and discover their effects and combinations.</li>
30
- <li>Use the weapons and gadgets that match each form, and unleash their power and sound effects.</li>
31
- <li>Customize your own flash belt and fullbottle set, and create your own unique rider.</li>
32
- <li>Share your screenshots and videos of your transformations and battles with other fans online.</li>
33
- </ul>
34
- <p>The game is a great way to immerse yourself in the world of Kamen Rider Build, and to express your creativity and fandom.</p>
35
- <h3>The feedback and reviews from other players and critics</h3>
36
- <p>The game has received positive feedback and reviews from other players and critics, who have praised its quality and features. Here are some examples of what they have said:</p>
37
- <blockquote>"This is one of the best flash games I have ever played. The graphics are amazing, the sound effects are realistic, and the gameplay is smooth and easy. I love how I can mix and match different fullbottles and weapons, and create my own rider. This game is a must-play for any Kamen Rider fan." - User review on Newgrounds</blockquote>
38
- <blockquote>"Kamen Rider Build Flash Belt is a game that captures the essence of the show perfectly. It is a simulation game that lets you experience the transformation process of Kamen Rider Build, as well as other characters from the show. The game has a lot of options and modes, and it is very interactive and engaging. The game is also updated regularly with new content and features, making it more enjoyable and exciting. If you are a fan of Kamen Rider Build, you should definitely check out this game." - Review by Tokusatsu Network</blockquote>
39
- <blockquote>"This game is awesome! I have been playing it for hours, and I still can't get enough of it. The game is very well-made, with high-quality graphics, sound effects, and animations. The game is also very accurate to the show, with all the fullbottles, evolbottles, weapons, and flash belts available. The game is also very fun to play, with different modes and options to choose from. I highly recommend this game to anyone who likes Kamen Rider Build or tokusatsu in general." - User review on DeviantArt</blockquote>
40
- <h3>The alternatives and updates to the game and how to stay updated</h3>
41
- <p>If you are looking for more games like Kamen Rider Build Flash Belt, you can also try these alternatives:</p>
42
- <ul>
43
- <li>Kamen Rider Ex-Aid Flash Belt: A simulation game that lets you play with the flash belts of Kamen Rider Ex-Aid, the 18th season of the series. You can use different gashats to transform into different forms of Ex-Aid, as well as other characters such as Brave, Snipe, Lazer, Genm, Para-DX, Poppy, Cronus, etc. You can also use various weapons such as the Gashacon Breaker, Gashacon Sword, Gashacon Magnum, etc. You can play the game online or download it from [here].</li>
44
- <li>Kamen Rider Zi-O Flash Belt: A simulation game that lets you play with the flash belts of Kamen Rider Zi-O, the 20th season of the series. You can use different ridewatches to transform into different forms of Zi-O, as well as other characters such as Geiz, Woz, Tsukuyomi, etc. You can also use various weapons such as the Zikan Girade, Zikan Zax, Zikan Despear, etc. You can play the game online or download it from [here].</li>
45
- <li>Kamen Rider Zero-One Flash Belt: A simulation game that lets you play with the flash belts of Kamen Rider Zero-One, the 21st season of the series. You can use different progrise keys to transform into different forms of Zero-One, as well as other characters such as Vulcan, Valkyrie, Horobi, Jin, etc. You can also use various weapons such as the Attache Calibur, Attache Shotgun, Attache Arrow, etc. You can play the game online or download it from [here].</li>
46
- </ul>
47
- <p>If you want to stay updated with the latest news and updates on Kamen Rider Build Flash Belt, you can follow these sources:</p>
48
- <ul>
49
- <li>The developer's DeviantArt page: [here] you can find the latest version of the game, as well as other flash games and artworks by CometComics.</li>
50
- <li>The developer's Twitter account: [here] you can get the latest announcements and updates on the game, as well as interact with the developer and other fans.</li>
51
- <li>The developer's Patreon page: [here] you can support the developer financially and get access to exclusive content and rewards, such as early access to new versions of the game, behind-the-scenes information, polls, etc.</li>
52
- </ul>
53
- <h2>Conclusion</h2>
54
- <p>Kamen Rider Build Flash Belt is a game that every fan of Kamen Rider Build should try. It is a simulation game that lets you play with the flash belts of the main characters and create your own combinations of fullbottles and evolbottles. The game is very fun and interactive, and it enhances your fan experience by letting you immerse yourself in the world of Kamen Rider Build. The game is also easy to play and access, and it has a lot of features and options to choose from. The game is also updated regularly with new content and features, making it more enjoyable and exciting. The game has also received positive feedback and reviews from other players and critics, who have praised its quality and features. If you are looking for more games like Kamen Rider Build Flash Belt, you can also try some alternatives that are based on other Kamen Rider series. If you want to stay updated with the latest news and updates on Kamen Rider Build Flash Belt, you can follow some sources that provide them.</p>
55
- <p>So what are you waiting for? Go ahead and try Kamen Rider Build Flash Belt today and have fun transforming into different forms of Kamen Rider Build and other characters. You will not regret it!</p>
56
- <h2>FAQs</h2>
57
- <h3>Q1: What is Kamen Rider Build?</h3>
58
- <p>A1: Kamen Rider Build is the 19th season of the popular Japanese tokusatsu series Kamen Rider, which aired from 2017 to 2018. The show follows the story of Kiryu Sento, a genius physicist who lost his memory and became a fugitive after being framed for a murder. He uses various fullbottles to transform into different forms of Kamen Rider Build, and fights against the evil organization Faust, who are behind a mysterious phenomenon called the Skywall that divided Japan into three regions.</p>
59
- <h3>Q2: How many forms and weapons are available in the game?</h3>
60
- <p>A2: The game has over 40 fullbottles and evolbottles that you can use to create different forms of Kamen Rider Build and other characters. The game also has over 20 weapons that correspond to each form, such as the Drill Crusher, Hawk Gatlinger, Fullbottle Buster, etc.</p>
61
- <h3>Q3: How can I support the developer of the game?</h3>
62
- <p>A3: You can support the developer of the game by following their DeviantArt page, Twitter account, and Patreon page. You can also leave feedback and reviews on their game pages, share their games with other fans, and donate to their Patreon page.</p>
63
- <h3>Q4: Is the game safe and virus-free?</h3>
64
- <p>A4: Yes, the game is safe and virus-free. The game does not contain any malicious or harmful content or software. However, you should always scan any downloaded files with your antivirus software before opening them.</p>
65
- <h3>Q5: Can I play the game offline or on mobile devices?</h3>
66
- <p>A5: Yes, you can play the game offline or on mobile devices. To play the game offline, you need to download it from Google Drive and run it on your computer. To play the game on mobile devices, you need to use a browser that supports Adobe Flash Player, such as Puffin Browser.</p> 197e85843d<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Egg Inc. APK el secreto del universo est en el huevo.md DELETED
@@ -1,199 +0,0 @@
1
- <br />
2
- <h1>Egg Inc APK Español: Un Juego de Simulación y Estrategia con Gallinas</h1>
3
- <p>¿Te gustan los juegos de simulación y estrategia? ¿Te gustan las gallinas? Si la respuesta es sí, entonces te encantará Egg Inc APK Español, un juego divertido y adictivo que te reta a crear la granja de huevos más avanzada del mundo. En este artículo te contamos todo lo que necesitas saber sobre este juego, desde cómo se juega hasta por qué deberías descargarlo desde APKCombo.</p>
4
- <h2>egg inc apk español</h2><br /><p><b><b>Download File</b> &#127775; <a href="https://jinyurl.com/2uNULc">https://jinyurl.com/2uNULc</a></b></p><br /><br />
5
- <h2>¿Qué es Egg Inc APK Español?</h2>
6
- <p>Egg Inc APK Español es un juego de simulación incremental (clicker) que utiliza muchos elementos de los juegos de estrategia que le dan un estilo único y original. El juego se ambienta en un futuro cercano donde los secretos del universo se desbloquearán en el huevo de gallina. Tú has decidido aprovechar la fiebre del oro y vender tantos huevos como puedas.</p>
7
- <p>El juego tiene una apariencia hermosa y colorida, con gráficos 3D y una deliciosa simulación de un enjambre de gallinas. Además de elegir tus inversiones sabiamente, también debes equilibrar tus recursos para asegurar un funcionamiento suave y eficiente de tu granja de huevos. Hay algo para todos aquí: los jugadores casuales disfrutarán del ambiente relajado y la simplicidad del juego. Tómate tu tiempo para construir una maravillosa granja de huevos y explorar todo el contenido. Los jugadores más experimentados en los juegos incrementales (clicker) amarán el juego emergente y la profundidad que ofrecen los diferentes estilos de juego que se necesitan a lo largo del juego. Para alcanzar el objetivo final de tener una granja de huevos gigantesca con un valor astronómico, tendrás que equilibrar estrategias a través de muchos prestigios para aprovechar mejor tu tiempo.</p>
8
- <h2>¿Cómo se juega a Egg Inc APK Español?</h2>
9
- <h3 <h3>Crea tu granja de huevos</h3>
10
- <p>El primer paso para jugar a Egg Inc APK Español es crear tu granja de huevos. Para ello, tendrás que tocar la pantalla para hacer que las gallinas salgan del gallinero y pongan huevos. Cuantas más gallinas tengas, más huevos producirás y más dinero ganarás.</p>
11
- <p>Pero no solo se trata de tocar la pantalla. También tendrás que construir y mejorar diferentes edificios para alojar a tus gallinas, transportar tus huevos, generar energía y más. Algunos de los edificios que podrás construir son:</p>
12
- <ul>
13
- <li>Casas de gallinas: Son el lugar donde viven tus gallinas. Puedes construir hasta cuatro casas de gallinas y mejorarlas para aumentar su capacidad y comodidad.</li>
14
- <li>Camiones: Son los vehículos que se encargan de llevar tus huevos al mercado. Puedes contratar hasta cuatro conductores y mejorar sus camiones para aumentar su velocidad y carga.</li>
15
- <li>Granos: Son los silos que almacenan el alimento para tus gallinas. Puedes construir hasta diez silos y mejorarlos para aumentar su capacidad y duración.</li>
16
- <li>Torres: Son las estructuras que generan energía para tu granja. Puedes construir hasta dos torres y mejorarlas para aumentar su potencia y eficiencia.</li>
17
- </ul>
18
- <p>Además de construir y mejorar edificios, también podrás comprar mejoras que te ayudarán a aumentar la producción y el valor de tus huevos. Algunas de las mejoras que podrás comprar son:</p>
19
- <ul>
20
- <li>Calidad del huevo: Aumenta el valor base de cada huevo que vendes.</li>
21
- <li>Cantidad del huevo: Aumenta la cantidad de huevos que pone cada gallina por minuto.</li>
22
- <li>Habitabilidad: Aumenta el número máximo de gallinas que puedes tener en tu granja.</li>
23
- <li>Velocidad de eclosión: Aumenta la velocidad a la que salen las gallinas del gallinero cuando tocas la pantalla.</li>
24
- </ul> <h3>Invierte en investigación y exploración espacial</h3>
25
- <p>Otra forma de jugar a Egg Inc APK Español es invertir en investigación y exploración espacial. Estas son dos actividades que te permitirán descubrir nuevos secretos sobre los huevos y el universo, así como obtener beneficios adicionales para tu granja.</p>
26
- <p>egg inc apk español descargar<br />
27
- egg inc apk español mod<br />
28
- egg inc apk español ultima version<br />
29
- egg inc apk español hack<br />
30
- egg inc apk español gratis<br />
31
- egg inc apk español mega<br />
32
- egg inc apk español full<br />
33
- egg inc apk español android<br />
34
- egg inc apk español 2023<br />
35
- egg inc apk español actualizado<br />
36
- egg inc apk español sin internet<br />
37
- egg inc apk español infinito<br />
38
- egg inc apk español trucos<br />
39
- egg inc apk español mediafire<br />
40
- egg inc apk español online<br />
41
- egg inc apk español premium<br />
42
- egg inc apk español dinero ilimitado<br />
43
- egg inc apk español sin anuncios<br />
44
- egg inc apk español oro infinito<br />
45
- egg inc apk español juego<br />
46
- egg inc apk español simulador<br />
47
- egg inc apk español descargar gratis<br />
48
- egg inc apk español mod menu<br />
49
- egg inc apk español 1.27.0<br />
50
- egg inc apk español uptodown<br />
51
- egg inc apk español para pc<br />
52
- egg inc apk español descargar mega<br />
53
- egg inc apk español hackeado<br />
54
- egg inc apk español descargar mediafire<br />
55
- egg inc apk español descargar ultima version<br />
56
- egg inc apk español descargar hackeado<br />
57
- egg inc apk español descargar mod<br />
58
- egg inc apk español descargar android<br />
59
- egg inc apk español descargar 2023<br />
60
- egg inc apk español descargar actualizado<br />
61
- egg inc apk español descargar sin internet<br />
62
- egg inc apk español descargar infinito<br />
63
- egg inc apk español descargar trucos<br />
64
- egg inc apk español descargar online<br />
65
- egg inc apk español descargar premium<br />
66
- egg inc apk español descargar dinero ilimitado<br />
67
- egg inc apk español descargar sin anuncios<br />
68
- egg inc apk español descargar oro infinito<br />
69
- egg inc apk español descargar juego<br />
70
- egg inc apk español descargar simulador<br />
71
- egg inc apk español mod dinero infinito <br />
72
- egg inc apk español mod oro infinito <br />
73
- egg inc apk español mod sin anuncios <br />
74
- egg inc apk español mod hackeado <br />
75
- egg inc apk español mod ultima version</p>
76
- <p>La investigación se divide en dos categorías: común y épica. La investigación común se paga con el dinero que ganas vendiendo huevos, y te ofrece mejoras permanentes para tu granja, como aumentar la felicidad de las gallinas, reducir el costo de las mejoras, acelerar el tiempo de construcción, etc. La investigación épica se paga con huevos de oro, que son una moneda especial que puedes obtener de varias formas, como viendo anuncios, completando misiones, abriendo cajas misteriosas, etc. La investigación épica te ofrece mejoras especiales para tu granja, como aumentar el efecto de las mejoras comunes, multiplicar el valor de los huevos, aumentar el límite de habitabilidad, etc.</p>
77
- <p>La exploración espacial se realiza mediante cohetes que puedes construir y lanzar desde tu granja. Los cohetes te permiten enviar gallinas y huevos al espacio para realizar misiones y experimentos. Algunos de los beneficios que puedes obtener de la exploración espacial son:</p>
78
- <ul>
79
- <li>Desbloquear nuevos tipos de huevos con propiedades únicas.</li>
80
- <li>Obtener recompensas como dinero, huevos de oro, boletos y más.</li>
81
- <li>Aumentar tu prestigio y tu nivel de alma de huevo.</li>
82
- <li>Descubrir artefactos que te dan bonificaciones especiales.</li>
83
- </ul> <h3>Prestigia y desbloquea nuevos huevos</h3>
84
- <p>El último aspecto que te explicaremos sobre cómo se juega a Egg Inc APK Español es el prestigio y el desbloqueo de nuevos huevos. Estas son dos acciones que te permitirán reiniciar tu progreso y empezar de nuevo con ventajas y desafíos adicionales.</p>
85
- <p>El prestigio es una opción que puedes activar cuando alcanzas cierto nivel de alma de huevo, que es una medida de tu éxito en el juego. Al hacer prestigio, reinicias tu granja y pierdes todo lo que has construido e invertido, pero conservas tus huevos de oro, tus cartas, tus artefactos y tus almas de huevo. Las almas de huevo te dan un bono multiplicador al valor de tus huevos, lo que te ayuda a avanzar más rápido y más lejos en tu próxima granja.</p>
86
- <p>El desbloqueo de nuevos huevos es una recompensa que obtienes al alcanzar cierto valor de granja, que depende del tipo de huevo que estés produciendo. Al desbloquear un nuevo tipo de huevo, puedes cambiar tu producción y empezar a vender ese huevo en lugar del anterior. Cada tipo de huevo tiene un valor base diferente, así como propiedades especiales que afectan al juego. Por ejemplo, el huevo comestible es el más básico y tiene un valor de 0.25 dólares, mientras que el huevo de antimateria es el más avanzado y tiene un valor de 1.8 billones de dólares.</p>
87
- <h2>¿Por qué descargar Egg Inc APK Español?</h2>
88
- <p>Ahora que ya sabes cómo se juega a Egg Inc APK Español, te preguntarás por qué deberías descargarlo desde APKCombo. La respuesta es simple: porque APKCombo te ofrece la mejor experiencia de descarga y juego posible. Algunas de las ventajas de descargar Egg Inc APK Español desde APKCombo son:</p>
89
- <ul>
90
- <li>Es gratis y seguro: No tienes que pagar nada por descargar el juego, ni tampoco te arriesgas a infectar tu dispositivo con virus o malware.</li>
91
- <li>Es rápido y fácil: Solo tienes que hacer clic en el botón de descarga y seguir las instrucciones para instalar el juego en tu dispositivo en cuestión de minutos.</li>
92
- <li>Es actualizado y compatible: Siempre tendrás la última versión del juego disponible, así como la opción de elegir la versión que mejor se adapte a tu dispositivo y a tus preferencias.</li>
93
- <li>Es divertido e ilimitado: Podrás disfrutar del juego sin restricciones ni limitaciones, así como acceder a todas las funciones y contenidos que ofrece.</li>
94
- </ul>
95
- <h2>Conclusión</h2>
96
- <p>Egg Inc APK Español es un juego de simulación y estrategia con gallinas que te hará pasar horas de diversión y entretenimiento. Podrás crear tu propia granja de huevos, invertir en investigación y exploración espacial, prestigiar y desbloquear nuevos huevos, y mucho más. Además, si descargas el juego desde APKCombo, podrás disfrutar de todas las ventajas que te ofrece esta plataforma, como rapidez, seguridad, actualización y compatibilidad. ¿A qué esperas para probarlo? ¡Descarga Egg Inc APK Español hoy mismo y empieza a vivir la aventura!</p>
97
- <h2>Preguntas frecuentes</h2>
98
- <h3>¿Es seguro descargar Egg Inc APK Español desde APKCombo?</h3>
99
- <p>Sí, es seguro descargar Egg Inc APK Español desde APKCombo. APKCombo es una plataforma confiable que verifica todos los archivos que ofrece para asegurarse de que no contienen virus ni malware. Además, APKCombo respeta tu privacidad y no recopila ni comparte tus datos personales.</p>
100
- <h3>¿Qué requisitos necesita mi dispositivo para jugar a Egg Inc APK Español?</h3>
101
- <p>Para jugar a Egg Inc APK Español necesitas tener un dispositivo con Android 7.0 o superior y 89 MB de espacio libre en tu almacenamiento interno o externo. El juego no requiere conexión a internet para funcionar, pero sí para acceder a algunas funciones opcionales como los anuncios o las misiones diarias.</p>
102
- <h3>¿Qué tipos de huevos hay en Egg Inc APK Español?</h3>
103
- <p>En Egg Inc APK Español hay 19 tipos de huevos diferentes, cada uno con un valor base y unas propiedades espec iales que afectan al juego. Por ejemplo, el huevo comestible es el más básico y tiene un valor de 0.25 dólares, mientras que el huevo de antimateria es el más avanzado y tiene un valor de 1.8 billones de dólares. La lista completa de los tipos de huevos es la siguiente:</p>
104
- <table>
105
- <tr>
106
- <th>Tipo de huevo</th>
107
- <th>Valor base</th>
108
- <th>Propiedad especial</th>
109
- </tr>
110
- <tr>
111
- <td>Comestible</td>
112
- <td>0.25 $</td>
113
- <td>Ninguna</td>
114
- </tr>
115
- <tr>
116
- <td>Súper alimento</td>
117
- <td>1.25 $</td>
118
- <td>Aumenta la felicidad de las gallinas en un 10%</td>
119
- </tr>
120
- <tr>
121
- <td>Médico</td>
122
- <td>6.25 $</td>
123
- <td>Aumenta la habitabilidad en un 10%</td>
124
- </tr>
125
- <tr>
126
- <td>Cohete</td>
127
- <td>30 $</td>
128
- <td>Aumenta la velocidad de eclosión en un 10%</td>
129
- </tr>
130
- <tr>
131
- <td>Súper material</td>
132
- <td>150 $</td>
133
- <td>Aumenta la calidad del huevo en un 10%</td>
134
- </tr>
135
- <tr>
136
- <td>Fusión</td>
137
- <td>700 $</td>
138
- <td>Aumenta la cantidad del huevo en un 10%</td>
139
- </tr>
140
- <tr <td>Cuantum</td>
141
- <td>3,000 $</td>
142
- <td>Aumenta el valor de los huevos en un 10% por cada gallina</td>
143
- </tr>
144
- <tr>
145
- <td>Inmortalidad</td>
146
- <td>12,500 $</td>
147
- <td>Aumenta la vida de las gallinas en un 10%</td>
148
- </tr>
149
- <tr>
150
- <td>Tachyon</td>
151
- <td>50,000 $</td>
152
- <td>Aumenta la velocidad de eclosión en un 20%</td>
153
- </tr>
154
- <tr>
155
- <td>Gravitón</td>
156
- <td>175,000 $</td>
157
- <td>Aumenta la gravedad en un 10%</td>
158
- </tr>
159
- <tr>
160
- <td>Dilithium</td>
161
- <td>525,000 $</td>
162
- <td>Aumenta la potencia de los cohetes en un 10%</td>
163
- </tr>
164
- <tr>
165
- <td>Protophase</td>
166
- <td>1.5 M$</td>
167
- <td>Aumenta la calidad del huevo en un 20%</td>
168
- </tr>
169
- <tr <td>Dark matter</td>
170
- <td>4.5 M$</td>
171
- <td>Aumenta el valor de los huevos en un 20% por cada gallina</td>
172
- </tr>
173
- <tr>
174
- <td>AI</td>
175
- <td>15 M$</td>
176
- <td>Aumenta la inteligencia de las gallinas en un 10%</td>
177
- </tr>
178
- <tr>
179
- <td>Neblina</td>
180
- <td>50 M$</td>
181
- <td>Aumenta la habitabilidad en un 20%</td>
182
- </tr>
183
- <tr>
184
- <td>Terraformación</td>
185
- <td>150 M$</td>
186
- <td>Aumenta la felicidad de las gallinas en un 20%</td>
187
- </tr>
188
- <tr>
189
- <td>Antimateria</td>
190
- <td>1.8 B$</td>
191
- <td>Aumenta la cantidad del huevo en un 20%</td>
192
- </tr>
193
- </table>
194
- <h3>¿Cómo puedo cooperar con otros jugadores en Egg Inc APK Español?</h3>
195
- <p>Una forma de cooperar con otros jugadores en Egg Inc APK Español es participar en los contratos cooperativos. Estos son desafíos especiales que te proponen producir una cantidad determinada de huevos de un tipo específico en un tiempo limitado. Para lograrlo, puedes unirte a un equipo con otros jugadores o crear el tuyo propio e invitar a tus amigos. Al completar los contratos cooperativos, podrás obtener recompensas como huevos de propulsión, huevos de oro, artefactos y más.</p>
196
- <h3>¿Qué son las cartas y cómo se usan en Egg Inc APK Español?</h3>
197
- <p>Las cartas son objetos coleccionables que te proporcionan mejoras y bonificaciones para tu juego. Puedes obtener cartas de varias formas, como comprándolas con huevos de oro, ganándolas en los contratos cooperativos, encontrándolas en las cajas misteriosas, etc. Hay cuatro tipos de cartas: comunes, raras, épicas y legendarias. Cada carta tiene un efecto diferente, como aumentar el valor de los huevos, reducir el costo de las mejoras, acelerar la producción, etc. Puedes usar las cartas activando sus efectos o combinándolas para crear cartas más poderosas.</p> 197e85843d<br />
198
- <br />
199
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4H17Joycelyn/text_generater/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Text Generater
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_AWGN_denoising/app.py DELETED
@@ -1,37 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from PIL import Image
4
-
5
-
6
- os.system(
7
- 'wget https://github.com/FanChiMao/SRMNet/releases/download/0.0/AWGN_denoising_SRMNet.pth -P experiments/pretrained_models')
8
-
9
-
10
- def inference(img):
11
- os.system('mkdir test')
12
- #basewidth = 512
13
- #wpercent = (basewidth / float(img.size[0]))
14
- #hsize = int((float(img.size[1]) * float(wpercent)))
15
- #img = img.resize((basewidth, hsize), Image.ANTIALIAS)
16
- img.save("test/1.png", "PNG")
17
- os.system(
18
- 'python main_test_SRMNet.py --input_dir test --weights experiments/pretrained_models/AWGN_denoising_SRMNet.pth')
19
- return 'result/1.png'
20
-
21
-
22
- title = "Selective Residual M-Net for Real-world Image Denoising"
23
- description = "Gradio demo for SRMNet. SRMNet has competitive performance results on two synthetic and two realworld noisy datasets in terms of quantitative metrics and visual quality. See the paper and project page for detailed results below. Here, we provide a demo for AWGN image denoising. To use it, simply upload your image, or click one of the examples to load them. Reference from: https://huggingface.co/akhaliq"
24
- article = "<p style='text-align: center'><a href='https://ieeexplore.ieee.org/document/9909521' target='_blank'>Selective Residual M-Net</a> | <a href='https://github.com/FanChiMao/SRMNet' target='_blank'>Github Repo</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=52Hz_SRMNet_AWGN_denoising' alt='visitor badge'></center>"
25
-
26
- examples = [['set5/baby.png'], ['set5/bird.png'],['set5/butterfly.png'],['set5/head.png'],['set5/woman.png']]
27
- gr.Interface(
28
- inference,
29
- [gr.inputs.Image(type="pil", label="Input")],
30
- gr.outputs.Image(type="filepath", label="Output"),
31
- title=title,
32
- description=description,
33
- article=article,
34
- allow_flagging=False,
35
- allow_screenshot=False,
36
- examples=examples
37
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/attention.py DELETED
@@ -1,261 +0,0 @@
1
- from inspect import isfunction
2
- import math
3
- import torch
4
- import torch.nn.functional as F
5
- from torch import nn, einsum
6
- from einops import rearrange, repeat
7
-
8
- from ldm.modules.diffusionmodules.util import checkpoint
9
-
10
-
11
- def exists(val):
12
- return val is not None
13
-
14
-
15
- def uniq(arr):
16
- return{el: True for el in arr}.keys()
17
-
18
-
19
- def default(val, d):
20
- if exists(val):
21
- return val
22
- return d() if isfunction(d) else d
23
-
24
-
25
- def max_neg_value(t):
26
- return -torch.finfo(t.dtype).max
27
-
28
-
29
- def init_(tensor):
30
- dim = tensor.shape[-1]
31
- std = 1 / math.sqrt(dim)
32
- tensor.uniform_(-std, std)
33
- return tensor
34
-
35
-
36
- # feedforward
37
- class GEGLU(nn.Module):
38
- def __init__(self, dim_in, dim_out):
39
- super().__init__()
40
- self.proj = nn.Linear(dim_in, dim_out * 2)
41
-
42
- def forward(self, x):
43
- x, gate = self.proj(x).chunk(2, dim=-1)
44
- return x * F.gelu(gate)
45
-
46
-
47
- class FeedForward(nn.Module):
48
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
49
- super().__init__()
50
- inner_dim = int(dim * mult)
51
- dim_out = default(dim_out, dim)
52
- project_in = nn.Sequential(
53
- nn.Linear(dim, inner_dim),
54
- nn.GELU()
55
- ) if not glu else GEGLU(dim, inner_dim)
56
-
57
- self.net = nn.Sequential(
58
- project_in,
59
- nn.Dropout(dropout),
60
- nn.Linear(inner_dim, dim_out)
61
- )
62
-
63
- def forward(self, x):
64
- return self.net(x)
65
-
66
-
67
- def zero_module(module):
68
- """
69
- Zero out the parameters of a module and return it.
70
- """
71
- for p in module.parameters():
72
- p.detach().zero_()
73
- return module
74
-
75
-
76
- def Normalize(in_channels):
77
- return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
78
-
79
-
80
- class LinearAttention(nn.Module):
81
- def __init__(self, dim, heads=4, dim_head=32):
82
- super().__init__()
83
- self.heads = heads
84
- hidden_dim = dim_head * heads
85
- self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
86
- self.to_out = nn.Conv2d(hidden_dim, dim, 1)
87
-
88
- def forward(self, x):
89
- b, c, h, w = x.shape
90
- qkv = self.to_qkv(x)
91
- q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
92
- k = k.softmax(dim=-1)
93
- context = torch.einsum('bhdn,bhen->bhde', k, v)
94
- out = torch.einsum('bhde,bhdn->bhen', context, q)
95
- out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
96
- return self.to_out(out)
97
-
98
-
99
- class SpatialSelfAttention(nn.Module):
100
- def __init__(self, in_channels):
101
- super().__init__()
102
- self.in_channels = in_channels
103
-
104
- self.norm = Normalize(in_channels)
105
- self.q = torch.nn.Conv2d(in_channels,
106
- in_channels,
107
- kernel_size=1,
108
- stride=1,
109
- padding=0)
110
- self.k = torch.nn.Conv2d(in_channels,
111
- in_channels,
112
- kernel_size=1,
113
- stride=1,
114
- padding=0)
115
- self.v = torch.nn.Conv2d(in_channels,
116
- in_channels,
117
- kernel_size=1,
118
- stride=1,
119
- padding=0)
120
- self.proj_out = torch.nn.Conv2d(in_channels,
121
- in_channels,
122
- kernel_size=1,
123
- stride=1,
124
- padding=0)
125
-
126
- def forward(self, x):
127
- h_ = x
128
- h_ = self.norm(h_)
129
- q = self.q(h_)
130
- k = self.k(h_)
131
- v = self.v(h_)
132
-
133
- # compute attention
134
- b,c,h,w = q.shape
135
- q = rearrange(q, 'b c h w -> b (h w) c')
136
- k = rearrange(k, 'b c h w -> b c (h w)')
137
- w_ = torch.einsum('bij,bjk->bik', q, k)
138
-
139
- w_ = w_ * (int(c)**(-0.5))
140
- w_ = torch.nn.functional.softmax(w_, dim=2)
141
-
142
- # attend to values
143
- v = rearrange(v, 'b c h w -> b c (h w)')
144
- w_ = rearrange(w_, 'b i j -> b j i')
145
- h_ = torch.einsum('bij,bjk->bik', v, w_)
146
- h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
147
- h_ = self.proj_out(h_)
148
-
149
- return x+h_
150
-
151
-
152
- class CrossAttention(nn.Module):
153
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):# 如果设置了context_dim就不是自注意力了
154
- super().__init__()
155
- inner_dim = dim_head * heads # inner_dim == SpatialTransformer.model_channels
156
- context_dim = default(context_dim, query_dim)
157
-
158
- self.scale = dim_head ** -0.5
159
- self.heads = heads
160
-
161
- self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
162
- self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
163
- self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
164
-
165
- self.to_out = nn.Sequential(
166
- nn.Linear(inner_dim, query_dim),
167
- nn.Dropout(dropout)
168
- )
169
-
170
- def forward(self, x, context=None, mask=None):# x:(b,h*w,c), context:(b,seq_len,context_dim)
171
- h = self.heads
172
-
173
- q = self.to_q(x)# q:(b,h*w,inner_dim)
174
- context = default(context, x)
175
- k = self.to_k(context)# (b,seq_len,inner_dim)
176
- v = self.to_v(context)# (b,seq_len,inner_dim)
177
-
178
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))# n is seq_len for k and v
179
-
180
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale # (b*head,h*w,seq_len)
181
-
182
- if exists(mask):# false
183
- mask = rearrange(mask, 'b ... -> b (...)')
184
- max_neg_value = -torch.finfo(sim.dtype).max
185
- mask = repeat(mask, 'b j -> (b h) () j', h=h)
186
- sim.masked_fill_(~mask, max_neg_value)
187
-
188
- # attention, what we cannot get enough of
189
- attn = sim.softmax(dim=-1)
190
-
191
- out = einsum('b i j, b j d -> b i d', attn, v)# (b*head,h*w,inner_dim/head)
192
- out = rearrange(out, '(b h) n d -> b n (h d)', h=h)# (b,h*w,inner_dim)
193
- return self.to_out(out)
194
-
195
-
196
- class BasicTransformerBlock(nn.Module):
197
- def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True):
198
- super().__init__()
199
- self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention
200
- self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
201
- self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
202
- heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
203
- self.norm1 = nn.LayerNorm(dim)
204
- self.norm2 = nn.LayerNorm(dim)
205
- self.norm3 = nn.LayerNorm(dim)
206
- self.checkpoint = checkpoint
207
-
208
- def forward(self, x, context=None):
209
- return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
210
-
211
- def _forward(self, x, context=None):
212
- x = self.attn1(self.norm1(x)) + x
213
- x = self.attn2(self.norm2(x), context=context) + x
214
- x = self.ff(self.norm3(x)) + x
215
- return x
216
-
217
-
218
- class SpatialTransformer(nn.Module):
219
- """
220
- Transformer block for image-like data.
221
- First, project the input (aka embedding)
222
- and reshape to b, t, d.
223
- Then apply standard transformer action.
224
- Finally, reshape to image
225
- """
226
- def __init__(self, in_channels, n_heads, d_head,
227
- depth=1, dropout=0., context_dim=None):
228
- super().__init__()
229
- self.in_channels = in_channels
230
- inner_dim = n_heads * d_head
231
- self.norm = Normalize(in_channels)
232
-
233
- self.proj_in = nn.Conv2d(in_channels,
234
- inner_dim,
235
- kernel_size=1,
236
- stride=1,
237
- padding=0)
238
-
239
- self.transformer_blocks = nn.ModuleList(
240
- [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
241
- for d in range(depth)]
242
- )
243
-
244
- self.proj_out = zero_module(nn.Conv2d(inner_dim,
245
- in_channels,
246
- kernel_size=1,
247
- stride=1,
248
- padding=0))
249
-
250
- def forward(self, x, context=None):
251
- # note: if no context is given, cross-attention defaults to self-attention
252
- b, c, h, w = x.shape # such as [2,320,10,106]
253
- x_in = x
254
- x = self.norm(x)# group norm
255
- x = self.proj_in(x)# no shape change
256
- x = rearrange(x, 'b c h w -> b (h w) c')
257
- for block in self.transformer_blocks:
258
- x = block(x, context=context)# context shape [b,seq_len=77,context_dim]
259
- x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
260
- x = self.proj_out(x)
261
- return x + x_in
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/vocoder/bigvgan/alias_free_torch/filter.py DELETED
@@ -1,95 +0,0 @@
1
- # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- import math
8
-
9
- if 'sinc' in dir(torch):
10
- sinc = torch.sinc
11
- else:
12
- # This code is adopted from adefossez's julius.core.sinc under the MIT License
13
- # https://adefossez.github.io/julius/julius/core.html
14
- # LICENSE is in incl_licenses directory.
15
- def sinc(x: torch.Tensor):
16
- """
17
- Implementation of sinc, i.e. sin(pi * x) / (pi * x)
18
- __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
19
- """
20
- return torch.where(x == 0,
21
- torch.tensor(1., device=x.device, dtype=x.dtype),
22
- torch.sin(math.pi * x) / math.pi / x)
23
-
24
-
25
- # This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
26
- # https://adefossez.github.io/julius/julius/lowpass.html
27
- # LICENSE is in incl_licenses directory.
28
- def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
29
- even = (kernel_size % 2 == 0)
30
- half_size = kernel_size // 2
31
-
32
- #For kaiser window
33
- delta_f = 4 * half_width
34
- A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
35
- if A > 50.:
36
- beta = 0.1102 * (A - 8.7)
37
- elif A >= 21.:
38
- beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.)
39
- else:
40
- beta = 0.
41
- window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
42
-
43
- # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
44
- if even:
45
- time = (torch.arange(-half_size, half_size) + 0.5)
46
- else:
47
- time = torch.arange(kernel_size) - half_size
48
- if cutoff == 0:
49
- filter_ = torch.zeros_like(time)
50
- else:
51
- filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
52
- # Normalize filter to have sum = 1, otherwise we will have a small leakage
53
- # of the constant component in the input signal.
54
- filter_ /= filter_.sum()
55
- filter = filter_.view(1, 1, kernel_size)
56
-
57
- return filter
58
-
59
-
60
- class LowPassFilter1d(nn.Module):
61
- def __init__(self,
62
- cutoff=0.5,
63
- half_width=0.6,
64
- stride: int = 1,
65
- padding: bool = True,
66
- padding_mode: str = 'replicate',
67
- kernel_size: int = 12):
68
- # kernel_size should be even number for stylegan3 setup,
69
- # in this implementation, odd number is also possible.
70
- super().__init__()
71
- if cutoff < -0.:
72
- raise ValueError("Minimum cutoff must be larger than zero.")
73
- if cutoff > 0.5:
74
- raise ValueError("A cutoff above 0.5 does not make sense.")
75
- self.kernel_size = kernel_size
76
- self.even = (kernel_size % 2 == 0)
77
- self.pad_left = kernel_size // 2 - int(self.even)
78
- self.pad_right = kernel_size // 2
79
- self.stride = stride
80
- self.padding = padding
81
- self.padding_mode = padding_mode
82
- filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
83
- self.register_buffer("filter", filter)
84
-
85
- #input [B, C, T]
86
- def forward(self, x):
87
- _, C, _ = x.shape
88
-
89
- if self.padding:
90
- x = F.pad(x, (self.pad_left, self.pad_right),
91
- mode=self.padding_mode)
92
- out = F.conv1d(x, self.filter.expand(C, -1, -1),
93
- stride=self.stride, groups=C)
94
-
95
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ALSv/FSW/roop/face_reference.py DELETED
@@ -1,21 +0,0 @@
1
- from typing import Optional
2
-
3
- from roop.typing import Face
4
-
5
- FACE_REFERENCE = None
6
-
7
-
8
- def get_face_reference() -> Optional[Face]:
9
- return FACE_REFERENCE
10
-
11
-
12
- def set_face_reference(face: Face) -> None:
13
- global FACE_REFERENCE
14
-
15
- FACE_REFERENCE = face
16
-
17
-
18
- def clear_face_reference() -> None:
19
- global FACE_REFERENCE
20
-
21
- FACE_REFERENCE = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py DELETED
@@ -1,182 +0,0 @@
1
- _base_ = './yolov7_l_syncbn_fast_8x16b-300e_coco.py'
2
-
3
- # ========================modified parameters========================
4
- # -----data related-----
5
- img_scale = (1280, 1280) # height, width
6
- num_classes = 80 # Number of classes for classification
7
- # Config of batch shapes. Only on val
8
- # It means not used if batch_shapes_cfg is None.
9
- batch_shapes_cfg = dict(
10
- img_size=img_scale[
11
- 0], # The image scale of padding should be divided by pad_size_divisor
12
- size_divisor=64) # Additional paddings for pixel scale
13
- tta_img_scales = [(1280, 1280), (1024, 1024), (1536, 1536)]
14
-
15
- # -----model related-----
16
- # Basic size of multi-scale prior box
17
- anchors = [
18
- [(19, 27), (44, 40), (38, 94)], # P3/8
19
- [(96, 68), (86, 152), (180, 137)], # P4/16
20
- [(140, 301), (303, 264), (238, 542)], # P5/32
21
- [(436, 615), (739, 380), (925, 792)] # P6/64
22
- ]
23
- strides = [8, 16, 32, 64] # Strides of multi-scale prior box
24
- num_det_layers = 4 # # The number of model output scales
25
- norm_cfg = dict(type='BN', momentum=0.03, eps=0.001)
26
-
27
- # Data augmentation
28
- max_translate_ratio = 0.2 # YOLOv5RandomAffine
29
- scaling_ratio_range = (0.1, 2.0) # YOLOv5RandomAffine
30
- mixup_prob = 0.15 # YOLOv5MixUp
31
- randchoice_mosaic_prob = [0.8, 0.2]
32
- mixup_alpha = 8.0 # YOLOv5MixUp
33
- mixup_beta = 8.0 # YOLOv5MixUp
34
-
35
- # -----train val related-----
36
- loss_cls_weight = 0.3
37
- loss_bbox_weight = 0.05
38
- loss_obj_weight = 0.7
39
- obj_level_weights = [4.0, 1.0, 0.25, 0.06]
40
- simota_candidate_topk = 20
41
-
42
- # The only difference between P6 and P5 in terms of
43
- # hyperparameters is lr_factor
44
- lr_factor = 0.2
45
-
46
- # ===============================Unmodified in most cases====================
47
- pre_transform = _base_.pre_transform
48
-
49
- model = dict(
50
- backbone=dict(arch='W', out_indices=(2, 3, 4, 5)),
51
- neck=dict(
52
- in_channels=[256, 512, 768, 1024],
53
- out_channels=[128, 256, 384, 512],
54
- use_maxpool_in_downsample=False,
55
- use_repconv_outs=False),
56
- bbox_head=dict(
57
- head_module=dict(
58
- type='YOLOv7p6HeadModule',
59
- in_channels=[128, 256, 384, 512],
60
- featmap_strides=strides,
61
- norm_cfg=norm_cfg,
62
- act_cfg=dict(type='SiLU', inplace=True)),
63
- prior_generator=dict(base_sizes=anchors, strides=strides),
64
- simota_candidate_topk=simota_candidate_topk, # note
65
- # scaled based on number of detection layers
66
- loss_cls=dict(loss_weight=loss_cls_weight *
67
- (num_classes / 80 * 3 / num_det_layers)),
68
- loss_bbox=dict(loss_weight=loss_bbox_weight * (3 / num_det_layers)),
69
- loss_obj=dict(loss_weight=loss_obj_weight *
70
- ((img_scale[0] / 640)**2 * 3 / num_det_layers)),
71
- obj_level_weights=obj_level_weights))
72
-
73
- mosiac4_pipeline = [
74
- dict(
75
- type='Mosaic',
76
- img_scale=img_scale,
77
- pad_val=114.0,
78
- pre_transform=pre_transform),
79
- dict(
80
- type='YOLOv5RandomAffine',
81
- max_rotate_degree=0.0,
82
- max_shear_degree=0.0,
83
- max_translate_ratio=max_translate_ratio, # note
84
- scaling_ratio_range=scaling_ratio_range, # note
85
- # img_scale is (width, height)
86
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
87
- border_val=(114, 114, 114)),
88
- ]
89
-
90
- mosiac9_pipeline = [
91
- dict(
92
- type='Mosaic9',
93
- img_scale=img_scale,
94
- pad_val=114.0,
95
- pre_transform=pre_transform),
96
- dict(
97
- type='YOLOv5RandomAffine',
98
- max_rotate_degree=0.0,
99
- max_shear_degree=0.0,
100
- max_translate_ratio=max_translate_ratio, # note
101
- scaling_ratio_range=scaling_ratio_range, # note
102
- # img_scale is (width, height)
103
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
104
- border_val=(114, 114, 114)),
105
- ]
106
-
107
- randchoice_mosaic_pipeline = dict(
108
- type='RandomChoice',
109
- transforms=[mosiac4_pipeline, mosiac9_pipeline],
110
- prob=randchoice_mosaic_prob)
111
-
112
- train_pipeline = [
113
- *pre_transform,
114
- randchoice_mosaic_pipeline,
115
- dict(
116
- type='YOLOv5MixUp',
117
- alpha=mixup_alpha, # note
118
- beta=mixup_beta, # note
119
- prob=mixup_prob,
120
- pre_transform=[*pre_transform, randchoice_mosaic_pipeline]),
121
- dict(type='YOLOv5HSVRandomAug'),
122
- dict(type='mmdet.RandomFlip', prob=0.5),
123
- dict(
124
- type='mmdet.PackDetInputs',
125
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
126
- 'flip_direction'))
127
- ]
128
- train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
129
-
130
- test_pipeline = [
131
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
132
- dict(type='YOLOv5KeepRatioResize', scale=img_scale),
133
- dict(
134
- type='LetterResize',
135
- scale=img_scale,
136
- allow_scale_up=False,
137
- pad_val=dict(img=114)),
138
- dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
139
- dict(
140
- type='mmdet.PackDetInputs',
141
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
142
- 'scale_factor', 'pad_param'))
143
- ]
144
- val_dataloader = dict(
145
- dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg))
146
- test_dataloader = val_dataloader
147
-
148
- default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
149
-
150
- # Config for Test Time Augmentation. (TTA)
151
- _multiscale_resize_transforms = [
152
- dict(
153
- type='Compose',
154
- transforms=[
155
- dict(type='YOLOv5KeepRatioResize', scale=s),
156
- dict(
157
- type='LetterResize',
158
- scale=s,
159
- allow_scale_up=False,
160
- pad_val=dict(img=114))
161
- ]) for s in tta_img_scales
162
- ]
163
-
164
- tta_pipeline = [
165
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
166
- dict(
167
- type='TestTimeAug',
168
- transforms=[
169
- _multiscale_resize_transforms,
170
- [
171
- dict(type='mmdet.RandomFlip', prob=1.),
172
- dict(type='mmdet.RandomFlip', prob=0.)
173
- ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)],
174
- [
175
- dict(
176
- type='mmdet.PackDetInputs',
177
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
178
- 'scale_factor', 'pad_param', 'flip',
179
- 'flip_direction'))
180
- ]
181
- ])
182
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/default_runtime.py DELETED
@@ -1,51 +0,0 @@
1
- # defaults to use registries in mmpretrain
2
- default_scope = 'mmpretrain'
3
-
4
- # configure default hooks
5
- default_hooks = dict(
6
- # record the time of every iteration.
7
- timer=dict(type='IterTimerHook'),
8
-
9
- # print log every 100 iterations.
10
- logger=dict(type='LoggerHook', interval=100),
11
-
12
- # enable the parameter scheduler.
13
- param_scheduler=dict(type='ParamSchedulerHook'),
14
-
15
- # save checkpoint per epoch.
16
- checkpoint=dict(type='CheckpointHook', interval=1),
17
-
18
- # set sampler seed in distributed evrionment.
19
- sampler_seed=dict(type='DistSamplerSeedHook'),
20
-
21
- # validation results visualization, set True to enable it.
22
- visualization=dict(type='VisualizationHook', enable=False),
23
- )
24
-
25
- # configure environment
26
- env_cfg = dict(
27
- # whether to enable cudnn benchmark
28
- cudnn_benchmark=False,
29
-
30
- # set multi process parameters
31
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
32
-
33
- # set distributed parameters
34
- dist_cfg=dict(backend='nccl'),
35
- )
36
-
37
- # set visualizer
38
- vis_backends = [dict(type='LocalVisBackend')]
39
- visualizer = dict(type='UniversalVisualizer', vis_backends=vis_backends)
40
-
41
- # set log level
42
- log_level = 'INFO'
43
-
44
- # load from which checkpoint
45
- load_from = None
46
-
47
- # whether to resume training from the loaded checkpoint
48
- resume = False
49
-
50
- # Defaults to use random seed and disable `deterministic`
51
- randomness = dict(seed=None, deterministic=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhaykoul/HelpingAI-T3/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/AbortedGeneration.ts DELETED
@@ -1,8 +0,0 @@
1
- // Ideally shouldn't be needed, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
2
-
3
- import type { Conversation } from "./Conversation";
4
- import type { Timestamps } from "./Timestamps";
5
-
6
- export interface AbortedGeneration extends Timestamps {
7
- conversationId: Conversation["_id"];
8
- }
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/utils/plots.py DELETED
@@ -1,489 +0,0 @@
1
- # Plotting utils
2
-
3
- import glob
4
- import math
5
- import os
6
- import random
7
- from copy import copy
8
- from pathlib import Path
9
-
10
- import cv2
11
- import matplotlib
12
- import matplotlib.pyplot as plt
13
- import numpy as np
14
- import pandas as pd
15
- import seaborn as sns
16
- import torch
17
- import yaml
18
- from PIL import Image, ImageDraw, ImageFont
19
- from scipy.signal import butter, filtfilt
20
-
21
- from utils.general import xywh2xyxy, xyxy2xywh
22
- from utils.metrics import fitness
23
-
24
- # Settings
25
- matplotlib.rc('font', **{'size': 11})
26
- matplotlib.use('Agg') # for writing to files only
27
-
28
-
29
- def color_list():
30
- # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
31
- def hex2rgb(h):
32
- return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
33
-
34
- return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
35
-
36
-
37
- def hist2d(x, y, n=100):
38
- # 2d histogram used in labels.png and evolve.png
39
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
40
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
41
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
42
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
43
- return np.log(hist[xidx, yidx])
44
-
45
-
46
- def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
47
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
48
- def butter_lowpass(cutoff, fs, order):
49
- nyq = 0.5 * fs
50
- normal_cutoff = cutoff / nyq
51
- return butter(order, normal_cutoff, btype='low', analog=False)
52
-
53
- b, a = butter_lowpass(cutoff, fs, order=order)
54
- return filtfilt(b, a, data) # forward-backward filter
55
-
56
-
57
- def plot_one_box(x, img, color=None, label=None, line_thickness=3):
58
- # Plots one bounding box on image img
59
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
60
- color = color or [random.randint(0, 255) for _ in range(3)]
61
- c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
62
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
63
- if label:
64
- tf = max(tl - 1, 1) # font thickness
65
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
66
- c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
67
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
68
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
69
-
70
-
71
- def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None):
72
- img = Image.fromarray(img)
73
- draw = ImageDraw.Draw(img)
74
- line_thickness = line_thickness or max(int(min(img.size) / 200), 2)
75
- draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
76
- if label:
77
- fontsize = max(round(max(img.size) / 40), 12)
78
- font = ImageFont.truetype("Arial.ttf", fontsize)
79
- txt_width, txt_height = font.getsize(label)
80
- draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
81
- draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
82
- return np.asarray(img)
83
-
84
-
85
- def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
86
- # Compares the two methods for width-height anchor multiplication
87
- # https://github.com/ultralytics/yolov3/issues/168
88
- x = np.arange(-4.0, 4.0, .1)
89
- ya = np.exp(x)
90
- yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
91
-
92
- fig = plt.figure(figsize=(6, 3), tight_layout=True)
93
- plt.plot(x, ya, '.-', label='YOLOv3')
94
- plt.plot(x, yb ** 2, '.-', label='YOLOR ^2')
95
- plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6')
96
- plt.xlim(left=-4, right=4)
97
- plt.ylim(bottom=0, top=6)
98
- plt.xlabel('input')
99
- plt.ylabel('output')
100
- plt.grid()
101
- plt.legend()
102
- fig.savefig('comparison.png', dpi=200)
103
-
104
-
105
- def output_to_target(output):
106
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
107
- targets = []
108
- for i, o in enumerate(output):
109
- for *box, conf, cls in o.cpu().numpy():
110
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
111
- return np.array(targets)
112
-
113
-
114
- def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
115
- # Plot image grid with labels
116
-
117
- if isinstance(images, torch.Tensor):
118
- images = images.cpu().float().numpy()
119
- if isinstance(targets, torch.Tensor):
120
- targets = targets.cpu().numpy()
121
-
122
- # un-normalise
123
- if np.max(images[0]) <= 1:
124
- images *= 255
125
-
126
- tl = 3 # line thickness
127
- tf = max(tl - 1, 1) # font thickness
128
- bs, _, h, w = images.shape # batch size, _, height, width
129
- bs = min(bs, max_subplots) # limit plot images
130
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
131
-
132
- # Check if we should resize
133
- scale_factor = max_size / max(h, w)
134
- if scale_factor < 1:
135
- h = math.ceil(scale_factor * h)
136
- w = math.ceil(scale_factor * w)
137
-
138
- colors = color_list() # list of colors
139
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
140
- for i, img in enumerate(images):
141
- if i == max_subplots: # if last batch has fewer images than we expect
142
- break
143
-
144
- block_x = int(w * (i // ns))
145
- block_y = int(h * (i % ns))
146
-
147
- img = img.transpose(1, 2, 0)
148
- if scale_factor < 1:
149
- img = cv2.resize(img, (w, h))
150
-
151
- mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
152
- if len(targets) > 0:
153
- image_targets = targets[targets[:, 0] == i]
154
- boxes = xywh2xyxy(image_targets[:, 2:6]).T
155
- classes = image_targets[:, 1].astype('int')
156
- labels = image_targets.shape[1] == 6 # labels if no conf column
157
- conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
158
-
159
- if boxes.shape[1]:
160
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
161
- boxes[[0, 2]] *= w # scale to pixels
162
- boxes[[1, 3]] *= h
163
- elif scale_factor < 1: # absolute coords need scale if image scales
164
- boxes *= scale_factor
165
- boxes[[0, 2]] += block_x
166
- boxes[[1, 3]] += block_y
167
- for j, box in enumerate(boxes.T):
168
- cls = int(classes[j])
169
- color = colors[cls % len(colors)]
170
- cls = names[cls] if names else cls
171
- if labels or conf[j] > 0.25: # 0.25 conf thresh
172
- label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
173
- plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
174
-
175
- # Draw image filename labels
176
- if paths:
177
- label = Path(paths[i]).name[:40] # trim to 40 char
178
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
179
- cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
180
- lineType=cv2.LINE_AA)
181
-
182
- # Image border
183
- cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
184
-
185
- if fname:
186
- r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
187
- mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
188
- # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
189
- Image.fromarray(mosaic).save(fname) # PIL save
190
- return mosaic
191
-
192
-
193
- def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
194
- # Plot LR simulating training for full epochs
195
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
196
- y = []
197
- for _ in range(epochs):
198
- scheduler.step()
199
- y.append(optimizer.param_groups[0]['lr'])
200
- plt.plot(y, '.-', label='LR')
201
- plt.xlabel('epoch')
202
- plt.ylabel('LR')
203
- plt.grid()
204
- plt.xlim(0, epochs)
205
- plt.ylim(0)
206
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
207
- plt.close()
208
-
209
-
210
- def plot_test_txt(): # from utils.plots import *; plot_test()
211
- # Plot test.txt histograms
212
- x = np.loadtxt('test.txt', dtype=np.float32)
213
- box = xyxy2xywh(x[:, :4])
214
- cx, cy = box[:, 0], box[:, 1]
215
-
216
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
217
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
218
- ax.set_aspect('equal')
219
- plt.savefig('hist2d.png', dpi=300)
220
-
221
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
222
- ax[0].hist(cx, bins=600)
223
- ax[1].hist(cy, bins=600)
224
- plt.savefig('hist1d.png', dpi=200)
225
-
226
-
227
- def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
228
- # Plot targets.txt histograms
229
- x = np.loadtxt('targets.txt', dtype=np.float32).T
230
- s = ['x targets', 'y targets', 'width targets', 'height targets']
231
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
232
- ax = ax.ravel()
233
- for i in range(4):
234
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
235
- ax[i].legend()
236
- ax[i].set_title(s[i])
237
- plt.savefig('targets.jpg', dpi=200)
238
-
239
-
240
- def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
241
- # Plot study.txt generated by test.py
242
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
243
- # ax = ax.ravel()
244
-
245
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
246
- # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]:
247
- for f in sorted(Path(path).glob('study*.txt')):
248
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
249
- x = np.arange(y.shape[1]) if x is None else np.array(x)
250
- s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
251
- # for i in range(7):
252
- # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
253
- # ax[i].set_title(s[i])
254
-
255
- j = y[3].argmax() + 1
256
- ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
257
- label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
258
-
259
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
260
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
261
-
262
- ax2.grid(alpha=0.2)
263
- ax2.set_yticks(np.arange(20, 60, 5))
264
- ax2.set_xlim(0, 57)
265
- ax2.set_ylim(30, 55)
266
- ax2.set_xlabel('GPU Speed (ms/img)')
267
- ax2.set_ylabel('COCO AP val')
268
- ax2.legend(loc='lower right')
269
- plt.savefig(str(Path(path).name) + '.png', dpi=300)
270
-
271
-
272
- def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
273
- # plot dataset labels
274
- print('Plotting labels... ')
275
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
276
- nc = int(c.max() + 1) # number of classes
277
- colors = color_list()
278
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
279
-
280
- # seaborn correlogram
281
- sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
282
- plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
283
- plt.close()
284
-
285
- # matplotlib labels
286
- matplotlib.use('svg') # faster
287
- ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
288
- ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
289
- ax[0].set_ylabel('instances')
290
- if 0 < len(names) < 30:
291
- ax[0].set_xticks(range(len(names)))
292
- ax[0].set_xticklabels(names, rotation=90, fontsize=10)
293
- else:
294
- ax[0].set_xlabel('classes')
295
- sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
296
- sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
297
-
298
- # rectangles
299
- labels[:, 1:3] = 0.5 # center
300
- labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
301
- img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
302
- for cls, *box in labels[:1000]:
303
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
304
- ax[1].imshow(img)
305
- ax[1].axis('off')
306
-
307
- for a in [0, 1, 2, 3]:
308
- for s in ['top', 'right', 'left', 'bottom']:
309
- ax[a].spines[s].set_visible(False)
310
-
311
- plt.savefig(save_dir / 'labels.jpg', dpi=200)
312
- matplotlib.use('Agg')
313
- plt.close()
314
-
315
- # loggers
316
- for k, v in loggers.items() or {}:
317
- if k == 'wandb' and v:
318
- v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
319
-
320
-
321
- def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
322
- # Plot hyperparameter evolution results in evolve.txt
323
- with open(yaml_file) as f:
324
- hyp = yaml.load(f, Loader=yaml.SafeLoader)
325
- x = np.loadtxt('evolve.txt', ndmin=2)
326
- f = fitness(x)
327
- # weights = (f - f.min()) ** 2 # for weighted results
328
- plt.figure(figsize=(10, 12), tight_layout=True)
329
- matplotlib.rc('font', **{'size': 8})
330
- for i, (k, v) in enumerate(hyp.items()):
331
- y = x[:, i + 7]
332
- # mu = (y * weights).sum() / weights.sum() # best weighted result
333
- mu = y[f.argmax()] # best single result
334
- plt.subplot(6, 5, i + 1)
335
- plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
336
- plt.plot(mu, f.max(), 'k+', markersize=15)
337
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
338
- if i % 5 != 0:
339
- plt.yticks([])
340
- print('%15s: %.3g' % (k, mu))
341
- plt.savefig('evolve.png', dpi=200)
342
- print('\nPlot saved as evolve.png')
343
-
344
-
345
- def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
346
- # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
347
- ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
348
- s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
349
- files = list(Path(save_dir).glob('frames*.txt'))
350
- for fi, f in enumerate(files):
351
- try:
352
- results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
353
- n = results.shape[1] # number of rows
354
- x = np.arange(start, min(stop, n) if stop else n)
355
- results = results[:, x]
356
- t = (results[0] - results[0].min()) # set t0=0s
357
- results[0] = x
358
- for i, a in enumerate(ax):
359
- if i < len(results):
360
- label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
361
- a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
362
- a.set_title(s[i])
363
- a.set_xlabel('time (s)')
364
- # if fi == len(files) - 1:
365
- # a.set_ylim(bottom=0)
366
- for side in ['top', 'right']:
367
- a.spines[side].set_visible(False)
368
- else:
369
- a.remove()
370
- except Exception as e:
371
- print('Warning: Plotting error for %s; %s' % (f, e))
372
-
373
- ax[1].legend()
374
- plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
375
-
376
-
377
- def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
378
- # Plot training 'results*.txt', overlaying train and val losses
379
- s = ['train', 'train', 'train', 'Precision', '[email protected]', 'val', 'val', 'val', 'Recall', '[email protected]:0.95'] # legends
380
- t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
381
- for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
382
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
383
- n = results.shape[1] # number of rows
384
- x = range(start, min(stop, n) if stop else n)
385
- fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
386
- ax = ax.ravel()
387
- for i in range(5):
388
- for j in [i, i + 5]:
389
- y = results[j, x]
390
- ax[i].plot(x, y, marker='.', label=s[j])
391
- # y_smooth = butter_lowpass_filtfilt(y)
392
- # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
393
-
394
- ax[i].set_title(t[i])
395
- ax[i].legend()
396
- ax[i].set_ylabel(f) if i == 0 else None # add filename
397
- fig.savefig(f.replace('.txt', '.png'), dpi=200)
398
-
399
-
400
- def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
401
- # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
402
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
403
- ax = ax.ravel()
404
- s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
405
- 'val Box', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']
406
- if bucket:
407
- # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
408
- files = ['results%g.txt' % x for x in id]
409
- c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
410
- os.system(c)
411
- else:
412
- files = list(Path(save_dir).glob('results*.txt'))
413
- assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
414
- for fi, f in enumerate(files):
415
- try:
416
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
417
- n = results.shape[1] # number of rows
418
- x = range(start, min(stop, n) if stop else n)
419
- for i in range(10):
420
- y = results[i, x]
421
- if i in [0, 1, 2, 5, 6, 7]:
422
- y[y == 0] = np.nan # don't show zero loss values
423
- # y /= y[0] # normalize
424
- label = labels[fi] if len(labels) else f.stem
425
- ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
426
- ax[i].set_title(s[i])
427
- # if i in [5, 6, 7]: # share train and val loss y axes
428
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
429
- except Exception as e:
430
- print('Warning: Plotting error for %s; %s' % (f, e))
431
-
432
- ax[1].legend()
433
- fig.savefig(Path(save_dir) / 'results.png', dpi=200)
434
-
435
-
436
- def output_to_keypoint(output):
437
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
438
- targets = []
439
- for i, o in enumerate(output):
440
- kpts = o[:,6:]
441
- o = o[:,:6]
442
- for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()):
443
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])])
444
- return np.array(targets)
445
-
446
-
447
- def plot_skeleton_kpts(im, kpts, steps, orig_shape=None):
448
- #Plot the skeleton and keypointsfor coco datatset
449
- palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
450
- [230, 230, 0], [255, 153, 255], [153, 204, 255],
451
- [255, 102, 255], [255, 51, 255], [102, 178, 255],
452
- [51, 153, 255], [255, 153, 153], [255, 102, 102],
453
- [255, 51, 51], [153, 255, 153], [102, 255, 102],
454
- [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0],
455
- [255, 255, 255]])
456
-
457
- skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
458
- [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
459
- [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
460
-
461
- pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]]
462
- pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]]
463
- radius = 5
464
- num_kpts = len(kpts) // steps
465
-
466
- for kid in range(num_kpts):
467
- r, g, b = pose_kpt_color[kid]
468
- x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1]
469
- if not (x_coord % 640 == 0 or y_coord % 640 == 0):
470
- if steps == 3:
471
- conf = kpts[steps * kid + 2]
472
- if conf < 0.5:
473
- continue
474
- cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1)
475
-
476
- for sk_id, sk in enumerate(skeleton):
477
- r, g, b = pose_limb_color[sk_id]
478
- pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1]))
479
- pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1]))
480
- if steps == 3:
481
- conf1 = kpts[(sk[0]-1)*steps+2]
482
- conf2 = kpts[(sk[1]-1)*steps+2]
483
- if conf1<0.5 or conf2<0.5:
484
- continue
485
- if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0:
486
- continue
487
- if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0:
488
- continue
489
- cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/GetStartPoint.js DELETED
@@ -1,27 +0,0 @@
1
- import GetThumbAlignPoint from './GetThumbAlignPoint.js';
2
-
3
- const AlignLeft = Phaser.Display.Align.LEFT_CENTER;
4
- const AlignTop = Phaser.Display.Align.TOP_CENTER;
5
-
6
- var GetStartPoint = function (out) {
7
- if (out === undefined) {
8
- out = tmpPoint;
9
- }
10
- if (this.childrenMap.thumb) {
11
- var align = (this.orientation === 0) ? AlignLeft : AlignTop;
12
- GetThumbAlignPoint.call(this, align, out);
13
- } else {
14
- if (this.orientation === 0) {
15
- out.x = this.innerLeft + 1; // Add 1 pixel margin
16
- out.y = this.centerY;
17
- } else {
18
- out.x = this.centerX;
19
- out.y = this.innerTop + 1; // Add 1 pixel margin
20
- }
21
- }
22
- return out;
23
- }
24
-
25
- var tmpPoint = {};
26
-
27
- export default GetStartPoint;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/filter_sharded_dataset.py DELETED
@@ -1,69 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
-
4
- import math
5
- import os
6
- import random
7
-
8
- import braceexpand
9
- import webdataset as wds
10
-
11
- DEFAULT_CATS_FILE = os.path.join(os.path.dirname(__file__), '..', 'configs', 'places2-categories_157.txt')
12
-
13
- def is_good_key(key, cats):
14
- return any(c in key for c in cats)
15
-
16
-
17
- def main(args):
18
- if args.categories == 'nofilter':
19
- good_categories = None
20
- else:
21
- with open(args.categories, 'r') as f:
22
- good_categories = set(line.strip().split(' ')[0] for line in f if line.strip())
23
-
24
- all_input_files = list(braceexpand.braceexpand(args.infile))
25
- chunk_size = int(math.ceil(len(all_input_files) / args.n_read_streams))
26
-
27
- input_iterators = [iter(wds.Dataset(all_input_files[start : start + chunk_size]).shuffle(args.shuffle_buffer))
28
- for start in range(0, len(all_input_files), chunk_size)]
29
- output_datasets = [wds.ShardWriter(args.outpattern.format(i)) for i in range(args.n_write_streams)]
30
-
31
- good_readers = list(range(len(input_iterators)))
32
- step_i = 0
33
- good_samples = 0
34
- bad_samples = 0
35
- while len(good_readers) > 0:
36
- if step_i % args.print_freq == 0:
37
- print(f'Iterations done {step_i}; readers alive {good_readers}; good samples {good_samples}; bad samples {bad_samples}')
38
-
39
- step_i += 1
40
-
41
- ri = random.choice(good_readers)
42
- try:
43
- sample = next(input_iterators[ri])
44
- except StopIteration:
45
- good_readers = list(set(good_readers) - {ri})
46
- continue
47
-
48
- if good_categories is not None and not is_good_key(sample['__key__'], good_categories):
49
- bad_samples += 1
50
- continue
51
-
52
- wi = random.randint(0, args.n_write_streams - 1)
53
- output_datasets[wi].write(sample)
54
- good_samples += 1
55
-
56
-
57
- if __name__ == '__main__':
58
- import argparse
59
-
60
- aparser = argparse.ArgumentParser()
61
- aparser.add_argument('--categories', type=str, default=DEFAULT_CATS_FILE)
62
- aparser.add_argument('--shuffle-buffer', type=int, default=10000)
63
- aparser.add_argument('--n-read-streams', type=int, default=10)
64
- aparser.add_argument('--n-write-streams', type=int, default=10)
65
- aparser.add_argument('--print-freq', type=int, default=1000)
66
- aparser.add_argument('infile', type=str)
67
- aparser.add_argument('outpattern', type=str)
68
-
69
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py DELETED
@@ -1,141 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
-
20
- from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
21
- from diffusers.utils.testing_utils import (
22
- is_onnx_available,
23
- load_image,
24
- nightly,
25
- require_onnxruntime,
26
- require_torch_gpu,
27
- )
28
-
29
- from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
30
-
31
-
32
- if is_onnx_available():
33
- import onnxruntime as ort
34
-
35
-
36
- class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
37
- # FIXME: add fast tests
38
- pass
39
-
40
-
41
- @nightly
42
- @require_onnxruntime
43
- @require_torch_gpu
44
- class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
45
- @property
46
- def gpu_provider(self):
47
- return (
48
- "CUDAExecutionProvider",
49
- {
50
- "gpu_mem_limit": "15000000000", # 15GB
51
- "arena_extend_strategy": "kSameAsRequested",
52
- },
53
- )
54
-
55
- @property
56
- def gpu_options(self):
57
- options = ort.SessionOptions()
58
- options.enable_mem_pattern = False
59
- return options
60
-
61
- def test_inference_default_pndm(self):
62
- init_image = load_image(
63
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
64
- "/in_paint/overture-creations-5sI6fQgYIuo.png"
65
- )
66
- mask_image = load_image(
67
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
68
- "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
69
- )
70
- pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
71
- "runwayml/stable-diffusion-inpainting",
72
- revision="onnx",
73
- safety_checker=None,
74
- feature_extractor=None,
75
- provider=self.gpu_provider,
76
- sess_options=self.gpu_options,
77
- )
78
- pipe.set_progress_bar_config(disable=None)
79
-
80
- prompt = "A red cat sitting on a park bench"
81
-
82
- generator = np.random.RandomState(0)
83
- output = pipe(
84
- prompt=prompt,
85
- image=init_image,
86
- mask_image=mask_image,
87
- guidance_scale=7.5,
88
- num_inference_steps=10,
89
- generator=generator,
90
- output_type="np",
91
- )
92
- images = output.images
93
- image_slice = images[0, 255:258, 255:258, -1]
94
-
95
- assert images.shape == (1, 512, 512, 3)
96
- expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])
97
-
98
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
99
-
100
- def test_inference_k_lms(self):
101
- init_image = load_image(
102
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
103
- "/in_paint/overture-creations-5sI6fQgYIuo.png"
104
- )
105
- mask_image = load_image(
106
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
107
- "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
108
- )
109
- lms_scheduler = LMSDiscreteScheduler.from_pretrained(
110
- "runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx"
111
- )
112
- pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
113
- "runwayml/stable-diffusion-inpainting",
114
- revision="onnx",
115
- scheduler=lms_scheduler,
116
- safety_checker=None,
117
- feature_extractor=None,
118
- provider=self.gpu_provider,
119
- sess_options=self.gpu_options,
120
- )
121
- pipe.set_progress_bar_config(disable=None)
122
-
123
- prompt = "A red cat sitting on a park bench"
124
-
125
- generator = np.random.RandomState(0)
126
- output = pipe(
127
- prompt=prompt,
128
- image=init_image,
129
- mask_image=mask_image,
130
- guidance_scale=7.5,
131
- num_inference_steps=20,
132
- generator=generator,
133
- output_type="np",
134
- )
135
- images = output.images
136
- image_slice = images[0, 255:258, 255:258, -1]
137
-
138
- assert images.shape == (1, 512, 512, 3)
139
- expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])
140
-
141
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py DELETED
@@ -1,14 +0,0 @@
1
- _base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- type='CascadeRCNN',
4
- pretrained='open-mmlab://resnext101_64x4d',
5
- backbone=dict(
6
- type='ResNeXt',
7
- depth=101,
8
- groups=64,
9
- base_width=4,
10
- num_stages=4,
11
- out_indices=(0, 1, 2, 3),
12
- frozen_stages=1,
13
- norm_cfg=dict(type='BN', requires_grad=True),
14
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fsaf.py DELETED
@@ -1,17 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .single_stage import SingleStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class FSAF(SingleStageDetector):
7
- """Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
8
-
9
- def __init__(self,
10
- backbone,
11
- neck,
12
- bbox_head,
13
- train_cfg=None,
14
- test_cfg=None,
15
- pretrained=None):
16
- super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
17
- test_cfg, pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py DELETED
@@ -1,43 +0,0 @@
1
- from mmcv.cnn import ConvModule
2
-
3
- from mmdet.models.builder import HEADS
4
- from .fcn_mask_head import FCNMaskHead
5
-
6
-
7
- @HEADS.register_module()
8
- class HTCMaskHead(FCNMaskHead):
9
-
10
- def __init__(self, with_conv_res=True, *args, **kwargs):
11
- super(HTCMaskHead, self).__init__(*args, **kwargs)
12
- self.with_conv_res = with_conv_res
13
- if self.with_conv_res:
14
- self.conv_res = ConvModule(
15
- self.conv_out_channels,
16
- self.conv_out_channels,
17
- 1,
18
- conv_cfg=self.conv_cfg,
19
- norm_cfg=self.norm_cfg)
20
-
21
- def init_weights(self):
22
- super(HTCMaskHead, self).init_weights()
23
- if self.with_conv_res:
24
- self.conv_res.init_weights()
25
-
26
- def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
27
- if res_feat is not None:
28
- assert self.with_conv_res
29
- res_feat = self.conv_res(res_feat)
30
- x = x + res_feat
31
- for conv in self.convs:
32
- x = conv(x)
33
- res_feat = x
34
- outs = []
35
- if return_logits:
36
- x = self.upsample(x)
37
- if self.upsample_method == 'deconv':
38
- x = self.relu(x)
39
- mask_pred = self.conv_logits(x)
40
- outs.append(mask_pred)
41
- if return_feat:
42
- outs.append(res_feat)
43
- return outs if len(outs) > 1 else outs[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/analyze_results.py DELETED
@@ -1,202 +0,0 @@
1
- import argparse
2
- import os.path as osp
3
-
4
- import mmcv
5
- import numpy as np
6
- from mmcv import Config, DictAction
7
-
8
- from mmdet.core.evaluation import eval_map
9
- from mmdet.core.visualization import imshow_gt_det_bboxes
10
- from mmdet.datasets import build_dataset, get_loading_pipeline
11
-
12
-
13
- def bbox_map_eval(det_result, annotation):
14
- """Evaluate mAP of single image det result.
15
-
16
- Args:
17
- det_result (list[list]): [[cls1_det, cls2_det, ...], ...].
18
- The outer list indicates images, and the inner list indicates
19
- per-class detected bboxes.
20
- annotation (dict): Ground truth annotations where keys of
21
- annotations are:
22
-
23
- - bboxes: numpy array of shape (n, 4)
24
- - labels: numpy array of shape (n, )
25
- - bboxes_ignore (optional): numpy array of shape (k, 4)
26
- - labels_ignore (optional): numpy array of shape (k, )
27
-
28
- Returns:
29
- float: mAP
30
- """
31
-
32
- # use only bbox det result
33
- if isinstance(det_result, tuple):
34
- bbox_det_result = [det_result[0]]
35
- else:
36
- bbox_det_result = [det_result]
37
- # mAP
38
- iou_thrs = np.linspace(
39
- .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
40
- mean_aps = []
41
- for thr in iou_thrs:
42
- mean_ap, _ = eval_map(
43
- bbox_det_result, [annotation], iou_thr=thr, logger='silent')
44
- mean_aps.append(mean_ap)
45
- return sum(mean_aps) / len(mean_aps)
46
-
47
-
48
- class ResultVisualizer(object):
49
- """Display and save evaluation results.
50
-
51
- Args:
52
- show (bool): Whether to show the image. Default: True
53
- wait_time (float): Value of waitKey param. Default: 0.
54
- score_thr (float): Minimum score of bboxes to be shown.
55
- Default: 0
56
- """
57
-
58
- def __init__(self, show=False, wait_time=0, score_thr=0):
59
- self.show = show
60
- self.wait_time = wait_time
61
- self.score_thr = score_thr
62
-
63
- def _save_image_gts_results(self, dataset, results, mAPs, out_dir=None):
64
- mmcv.mkdir_or_exist(out_dir)
65
-
66
- for mAP_info in mAPs:
67
- index, mAP = mAP_info
68
- data_info = dataset.prepare_train_img(index)
69
-
70
- # calc save file path
71
- filename = data_info['filename']
72
- if data_info['img_prefix'] is not None:
73
- filename = osp.join(data_info['img_prefix'], filename)
74
- else:
75
- filename = data_info['filename']
76
- fname, name = osp.splitext(osp.basename(filename))
77
- save_filename = fname + '_' + str(round(mAP, 3)) + name
78
- out_file = osp.join(out_dir, save_filename)
79
- imshow_gt_det_bboxes(
80
- data_info['img'],
81
- data_info,
82
- results[index],
83
- dataset.CLASSES,
84
- show=self.show,
85
- score_thr=self.score_thr,
86
- wait_time=self.wait_time,
87
- out_file=out_file)
88
-
89
- def evaluate_and_show(self,
90
- dataset,
91
- results,
92
- topk=20,
93
- show_dir='work_dir',
94
- eval_fn=None):
95
- """Evaluate and show results.
96
-
97
- Args:
98
- dataset (Dataset): A PyTorch dataset.
99
- results (list): Det results from test results pkl file
100
- topk (int): Number of the highest topk and
101
- lowest topk after evaluation index sorting. Default: 20
102
- show_dir (str, optional): The filename to write the image.
103
- Default: 'work_dir'
104
- eval_fn (callable, optional): Eval function, Default: None
105
- """
106
-
107
- assert topk > 0
108
- if (topk * 2) > len(dataset):
109
- topk = len(dataset) // 2
110
-
111
- if eval_fn is None:
112
- eval_fn = bbox_map_eval
113
- else:
114
- assert callable(eval_fn)
115
-
116
- prog_bar = mmcv.ProgressBar(len(results))
117
- _mAPs = {}
118
- for i, (result, ) in enumerate(zip(results)):
119
- # self.dataset[i] should not call directly
120
- # because there is a risk of mismatch
121
- data_info = dataset.prepare_train_img(i)
122
- mAP = eval_fn(result, data_info['ann_info'])
123
- _mAPs[i] = mAP
124
- prog_bar.update()
125
-
126
- # descending select topk image
127
- _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))
128
- good_mAPs = _mAPs[-topk:]
129
- bad_mAPs = _mAPs[:topk]
130
-
131
- good_dir = osp.abspath(osp.join(show_dir, 'good'))
132
- bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
133
- self._save_image_gts_results(dataset, results, good_mAPs, good_dir)
134
- self._save_image_gts_results(dataset, results, bad_mAPs, bad_dir)
135
-
136
-
137
- def parse_args():
138
- parser = argparse.ArgumentParser(
139
- description='MMDet eval image prediction result for each')
140
- parser.add_argument('config', help='test config file path')
141
- parser.add_argument(
142
- 'prediction_path', help='prediction path where test pkl result')
143
- parser.add_argument(
144
- 'show_dir', help='directory where painted images will be saved')
145
- parser.add_argument('--show', action='store_true', help='show results')
146
- parser.add_argument(
147
- '--wait-time',
148
- type=float,
149
- default=0,
150
- help='the interval of show (s), 0 is block')
151
- parser.add_argument(
152
- '--topk',
153
- default=20,
154
- type=int,
155
- help='saved Number of the highest topk '
156
- 'and lowest topk after index sorting')
157
- parser.add_argument(
158
- '--show-score-thr',
159
- type=float,
160
- default=0,
161
- help='score threshold (default: 0.)')
162
- parser.add_argument(
163
- '--cfg-options',
164
- nargs='+',
165
- action=DictAction,
166
- help='override some settings in the used config, the key-value pair '
167
- 'in xxx=yyy format will be merged into config file. If the value to '
168
- 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
169
- 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
170
- 'Note that the quotation marks are necessary and that no white space '
171
- 'is allowed.')
172
- args = parser.parse_args()
173
- return args
174
-
175
-
176
- def main():
177
- args = parse_args()
178
-
179
- mmcv.check_file_exist(args.prediction_path)
180
-
181
- cfg = Config.fromfile(args.config)
182
- if args.cfg_options is not None:
183
- cfg.merge_from_dict(args.cfg_options)
184
- cfg.data.test.test_mode = True
185
- # import modules from string list.
186
- if cfg.get('custom_imports', None):
187
- from mmcv.utils import import_modules_from_strings
188
- import_modules_from_strings(**cfg['custom_imports'])
189
-
190
- cfg.data.test.pop('samples_per_gpu', 0)
191
- cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline)
192
- dataset = build_dataset(cfg.data.test)
193
- outputs = mmcv.load(args.prediction_path)
194
-
195
- result_visualizer = ResultVisualizer(args.show, args.wait_time,
196
- args.show_score_thr)
197
- result_visualizer.evaluate_and_show(
198
- dataset, outputs, topk=args.topk, show_dir=args.show_dir)
199
-
200
-
201
- if __name__ == '__main__':
202
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformerv2_demo/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Uniformerv2_demo
3
- emoji: 📹
4
- colorFrom: pink
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.0.3
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Angelaangie/personal-chat-gpt/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Personal Chat Gpt
3
- emoji: 📉
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/train.py DELETED
@@ -1,63 +0,0 @@
1
- import time
2
- from options.train_options import TrainOptions
3
- from dataloader.data_loader import dataloader
4
- from model import create_model
5
- from util.visualizer import Visualizer
6
-
7
-
8
- if __name__ == '__main__':
9
- opt = TrainOptions().parse() # get training options
10
- dataset = dataloader(opt) # create a dataset
11
- dataset_size = len(dataset) * opt.batch_size
12
- print('training images = %d' % dataset_size)
13
- model = create_model(opt) # create a model given opt.model and other options
14
- visualizer = Visualizer(opt) # create a visualizer
15
-
16
- total_iters = opt.iter_count # the total number of training iterations
17
- epoch = 0
18
- max_iteration = opt.n_iter + opt.n_iter_decay
19
-
20
- while (total_iters < max_iteration):
21
- epoch_start_time = time.time() # timer for entire epoch
22
- iter_data_time = time.time() # timer for data loading per iteration
23
- epoch += 1 # the number of training iterations in current epoch, reset to 0 every epoch
24
- epoch_iter = 0
25
- visualizer.reset() # reset the visualizer
26
-
27
- for i, data in enumerate(dataset):
28
- iter_start_time = time.time()
29
- if total_iters % opt.print_freq == 0:
30
- t_data = iter_start_time - iter_data_time
31
- if total_iters == 0:
32
- model.setup(opt)
33
- model.parallelize()
34
- total_iters += opt.batch_size
35
- epoch_iter += opt.batch_size
36
-
37
- model.set_input(data) # unpack data from dataset and apply preprocessing
38
- model.optimize_parameters()
39
-
40
- if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
41
- save_result = total_iters % opt.update_html_freq == 0
42
- model.log_imgs()
43
- visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
44
-
45
- if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
46
- losses = model.get_current_losses()
47
- t_comp = (time.time() - iter_start_time) / opt.batch_size
48
- visualizer.print_current_losses(epoch, total_iters, losses, t_comp, t_data)
49
- if opt.display_id is None or opt.display_id > 0:
50
- visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
51
-
52
- if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
53
- print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
54
- print(opt.name) # it's useful to occasionally show the experiment name on console
55
- model.save_networks('latest')
56
-
57
- if total_iters % opt.save_iters_freq == 0: # cache our model every <save_epoch_freq> epochs
58
- print('saving the model at the end of iters %d' % (total_iters))
59
- model.save_networks('latest')
60
- model.save_networks(total_iters)
61
-
62
- print('End of iters %d / %d \t Time Taken: %d sec' % (total_iters, max_iteration, time.time() - epoch_start_time))
63
- model.update_learning_rate()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/upsample.py DELETED
@@ -1,84 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from ..utils import xavier_init
6
- from .registry import UPSAMPLE_LAYERS
7
-
8
- UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
9
- UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
10
-
11
-
12
- @UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
13
- class PixelShufflePack(nn.Module):
14
- """Pixel Shuffle upsample layer.
15
-
16
- This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
17
- achieve a simple upsampling with pixel shuffle.
18
-
19
- Args:
20
- in_channels (int): Number of input channels.
21
- out_channels (int): Number of output channels.
22
- scale_factor (int): Upsample ratio.
23
- upsample_kernel (int): Kernel size of the conv layer to expand the
24
- channels.
25
- """
26
-
27
- def __init__(self, in_channels, out_channels, scale_factor,
28
- upsample_kernel):
29
- super(PixelShufflePack, self).__init__()
30
- self.in_channels = in_channels
31
- self.out_channels = out_channels
32
- self.scale_factor = scale_factor
33
- self.upsample_kernel = upsample_kernel
34
- self.upsample_conv = nn.Conv2d(
35
- self.in_channels,
36
- self.out_channels * scale_factor * scale_factor,
37
- self.upsample_kernel,
38
- padding=(self.upsample_kernel - 1) // 2)
39
- self.init_weights()
40
-
41
- def init_weights(self):
42
- xavier_init(self.upsample_conv, distribution='uniform')
43
-
44
- def forward(self, x):
45
- x = self.upsample_conv(x)
46
- x = F.pixel_shuffle(x, self.scale_factor)
47
- return x
48
-
49
-
50
- def build_upsample_layer(cfg, *args, **kwargs):
51
- """Build upsample layer.
52
-
53
- Args:
54
- cfg (dict): The upsample layer config, which should contain:
55
-
56
- - type (str): Layer type.
57
- - scale_factor (int): Upsample ratio, which is not applicable to
58
- deconv.
59
- - layer args: Args needed to instantiate a upsample layer.
60
- args (argument list): Arguments passed to the ``__init__``
61
- method of the corresponding conv layer.
62
- kwargs (keyword arguments): Keyword arguments passed to the
63
- ``__init__`` method of the corresponding conv layer.
64
-
65
- Returns:
66
- nn.Module: Created upsample layer.
67
- """
68
- if not isinstance(cfg, dict):
69
- raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
70
- if 'type' not in cfg:
71
- raise KeyError(
72
- f'the cfg dict must contain the key "type", but got {cfg}')
73
- cfg_ = cfg.copy()
74
-
75
- layer_type = cfg_.pop('type')
76
- if layer_type not in UPSAMPLE_LAYERS:
77
- raise KeyError(f'Unrecognized upsample type {layer_type}')
78
- else:
79
- upsample = UPSAMPLE_LAYERS.get(layer_type)
80
-
81
- if upsample is nn.Upsample:
82
- cfg_['mode'] = layer_type
83
- layer = upsample(*args, **kwargs, **cfg_)
84
- return layer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py DELETED
@@ -1,82 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from ...dist_utils import master_only
3
- from ..hook import HOOKS
4
- from .base import LoggerHook
5
-
6
-
7
- @HOOKS.register_module()
8
- class NeptuneLoggerHook(LoggerHook):
9
- """Class to log metrics to NeptuneAI.
10
-
11
- It requires `neptune-client` to be installed.
12
-
13
- Args:
14
- init_kwargs (dict): a dict contains the initialization keys as below:
15
- - project (str): Name of a project in a form of
16
- namespace/project_name. If None, the value of
17
- NEPTUNE_PROJECT environment variable will be taken.
18
- - api_token (str): User’s API token.
19
- If None, the value of NEPTUNE_API_TOKEN environment
20
- variable will be taken. Note: It is strongly recommended
21
- to use NEPTUNE_API_TOKEN environment variable rather than
22
- placing your API token in plain text in your source code.
23
- - name (str, optional, default is 'Untitled'): Editable name of
24
- the run. Name is displayed in the run's Details and in
25
- Runs table as a column.
26
- Check https://docs.neptune.ai/api-reference/neptune#init for
27
- more init arguments.
28
- interval (int): Logging interval (every k iterations).
29
- ignore_last (bool): Ignore the log of last iterations in each epoch
30
- if less than `interval`.
31
- reset_flag (bool): Whether to clear the output buffer after logging
32
- by_epoch (bool): Whether EpochBasedRunner is used.
33
-
34
- .. _NeptuneAI:
35
- https://docs.neptune.ai/you-should-know/logging-metadata
36
- """
37
-
38
- def __init__(self,
39
- init_kwargs=None,
40
- interval=10,
41
- ignore_last=True,
42
- reset_flag=True,
43
- with_step=True,
44
- by_epoch=True):
45
-
46
- super(NeptuneLoggerHook, self).__init__(interval, ignore_last,
47
- reset_flag, by_epoch)
48
- self.import_neptune()
49
- self.init_kwargs = init_kwargs
50
- self.with_step = with_step
51
-
52
- def import_neptune(self):
53
- try:
54
- import neptune.new as neptune
55
- except ImportError:
56
- raise ImportError(
57
- 'Please run "pip install neptune-client" to install neptune')
58
- self.neptune = neptune
59
- self.run = None
60
-
61
- @master_only
62
- def before_run(self, runner):
63
- if self.init_kwargs:
64
- self.run = self.neptune.init(**self.init_kwargs)
65
- else:
66
- self.run = self.neptune.init()
67
-
68
- @master_only
69
- def log(self, runner):
70
- tags = self.get_loggable_tags(runner)
71
- if tags:
72
- for tag_name, tag_value in tags.items():
73
- if self.with_step:
74
- self.run[tag_name].log(
75
- tag_value, step=self.get_iter(runner))
76
- else:
77
- tags['global_step'] = self.get_iter(runner)
78
- self.run[tag_name].log(tags)
79
-
80
- @master_only
81
- def after_run(self, runner):
82
- self.run.stop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/install_scripts.py DELETED
@@ -1,61 +0,0 @@
1
- """distutils.command.install_scripts
2
-
3
- Implements the Distutils 'install_scripts' command, for installing
4
- Python scripts."""
5
-
6
- # contributed by Bastian Kleineidam
7
-
8
- import os
9
- from distutils.core import Command
10
- from distutils import log
11
- from stat import ST_MODE
12
-
13
-
14
- class install_scripts(Command):
15
-
16
- description = "install scripts (Python or otherwise)"
17
-
18
- user_options = [
19
- ('install-dir=', 'd', "directory to install scripts to"),
20
- ('build-dir=', 'b', "build directory (where to install from)"),
21
- ('force', 'f', "force installation (overwrite existing files)"),
22
- ('skip-build', None, "skip the build steps"),
23
- ]
24
-
25
- boolean_options = ['force', 'skip-build']
26
-
27
- def initialize_options(self):
28
- self.install_dir = None
29
- self.force = 0
30
- self.build_dir = None
31
- self.skip_build = None
32
-
33
- def finalize_options(self):
34
- self.set_undefined_options('build', ('build_scripts', 'build_dir'))
35
- self.set_undefined_options(
36
- 'install',
37
- ('install_scripts', 'install_dir'),
38
- ('force', 'force'),
39
- ('skip_build', 'skip_build'),
40
- )
41
-
42
- def run(self):
43
- if not self.skip_build:
44
- self.run_command('build_scripts')
45
- self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
46
- if os.name == 'posix':
47
- # Set the executable bits (owner, group, and world) on
48
- # all the scripts we just installed.
49
- for file in self.get_outputs():
50
- if self.dry_run:
51
- log.info("changing mode of %s", file)
52
- else:
53
- mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
54
- log.info("changing mode of %s to %o", file, mode)
55
- os.chmod(file, mode)
56
-
57
- def get_inputs(self):
58
- return self.distribution.scripts or []
59
-
60
- def get_outputs(self):
61
- return self.outfiles or []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/text_file.py DELETED
@@ -1,287 +0,0 @@
1
- """text_file
2
-
3
- provides the TextFile class, which gives an interface to text files
4
- that (optionally) takes care of stripping comments, ignoring blank
5
- lines, and joining lines with backslashes."""
6
-
7
- import sys
8
-
9
-
10
- class TextFile:
11
- """Provides a file-like object that takes care of all the things you
12
- commonly want to do when processing a text file that has some
13
- line-by-line syntax: strip comments (as long as "#" is your
14
- comment character), skip blank lines, join adjacent lines by
15
- escaping the newline (ie. backslash at end of line), strip
16
- leading and/or trailing whitespace. All of these are optional
17
- and independently controllable.
18
-
19
- Provides a 'warn()' method so you can generate warning messages that
20
- report physical line number, even if the logical line in question
21
- spans multiple physical lines. Also provides 'unreadline()' for
22
- implementing line-at-a-time lookahead.
23
-
24
- Constructor is called as:
25
-
26
- TextFile (filename=None, file=None, **options)
27
-
28
- It bombs (RuntimeError) if both 'filename' and 'file' are None;
29
- 'filename' should be a string, and 'file' a file object (or
30
- something that provides 'readline()' and 'close()' methods). It is
31
- recommended that you supply at least 'filename', so that TextFile
32
- can include it in warning messages. If 'file' is not supplied,
33
- TextFile creates its own using 'io.open()'.
34
-
35
- The options are all boolean, and affect the value returned by
36
- 'readline()':
37
- strip_comments [default: true]
38
- strip from "#" to end-of-line, as well as any whitespace
39
- leading up to the "#" -- unless it is escaped by a backslash
40
- lstrip_ws [default: false]
41
- strip leading whitespace from each line before returning it
42
- rstrip_ws [default: true]
43
- strip trailing whitespace (including line terminator!) from
44
- each line before returning it
45
- skip_blanks [default: true}
46
- skip lines that are empty *after* stripping comments and
47
- whitespace. (If both lstrip_ws and rstrip_ws are false,
48
- then some lines may consist of solely whitespace: these will
49
- *not* be skipped, even if 'skip_blanks' is true.)
50
- join_lines [default: false]
51
- if a backslash is the last non-newline character on a line
52
- after stripping comments and whitespace, join the following line
53
- to it to form one "logical line"; if N consecutive lines end
54
- with a backslash, then N+1 physical lines will be joined to
55
- form one logical line.
56
- collapse_join [default: false]
57
- strip leading whitespace from lines that are joined to their
58
- predecessor; only matters if (join_lines and not lstrip_ws)
59
- errors [default: 'strict']
60
- error handler used to decode the file content
61
-
62
- Note that since 'rstrip_ws' can strip the trailing newline, the
63
- semantics of 'readline()' must differ from those of the builtin file
64
- object's 'readline()' method! In particular, 'readline()' returns
65
- None for end-of-file: an empty string might just be a blank line (or
66
- an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
67
- not."""
68
-
69
- default_options = {
70
- 'strip_comments': 1,
71
- 'skip_blanks': 1,
72
- 'lstrip_ws': 0,
73
- 'rstrip_ws': 1,
74
- 'join_lines': 0,
75
- 'collapse_join': 0,
76
- 'errors': 'strict',
77
- }
78
-
79
- def __init__(self, filename=None, file=None, **options):
80
- """Construct a new TextFile object. At least one of 'filename'
81
- (a string) and 'file' (a file-like object) must be supplied.
82
- They keyword argument options are described above and affect
83
- the values returned by 'readline()'."""
84
- if filename is None and file is None:
85
- raise RuntimeError(
86
- "you must supply either or both of 'filename' and 'file'"
87
- )
88
-
89
- # set values for all options -- either from client option hash
90
- # or fallback to default_options
91
- for opt in self.default_options.keys():
92
- if opt in options:
93
- setattr(self, opt, options[opt])
94
- else:
95
- setattr(self, opt, self.default_options[opt])
96
-
97
- # sanity check client option hash
98
- for opt in options.keys():
99
- if opt not in self.default_options:
100
- raise KeyError("invalid TextFile option '%s'" % opt)
101
-
102
- if file is None:
103
- self.open(filename)
104
- else:
105
- self.filename = filename
106
- self.file = file
107
- self.current_line = 0 # assuming that file is at BOF!
108
-
109
- # 'linebuf' is a stack of lines that will be emptied before we
110
- # actually read from the file; it's only populated by an
111
- # 'unreadline()' operation
112
- self.linebuf = []
113
-
114
- def open(self, filename):
115
- """Open a new file named 'filename'. This overrides both the
116
- 'filename' and 'file' arguments to the constructor."""
117
- self.filename = filename
118
- self.file = open(self.filename, errors=self.errors)
119
- self.current_line = 0
120
-
121
- def close(self):
122
- """Close the current file and forget everything we know about it
123
- (filename, current line number)."""
124
- file = self.file
125
- self.file = None
126
- self.filename = None
127
- self.current_line = None
128
- file.close()
129
-
130
- def gen_error(self, msg, line=None):
131
- outmsg = []
132
- if line is None:
133
- line = self.current_line
134
- outmsg.append(self.filename + ", ")
135
- if isinstance(line, (list, tuple)):
136
- outmsg.append("lines %d-%d: " % tuple(line))
137
- else:
138
- outmsg.append("line %d: " % line)
139
- outmsg.append(str(msg))
140
- return "".join(outmsg)
141
-
142
- def error(self, msg, line=None):
143
- raise ValueError("error: " + self.gen_error(msg, line))
144
-
145
- def warn(self, msg, line=None):
146
- """Print (to stderr) a warning message tied to the current logical
147
- line in the current file. If the current logical line in the
148
- file spans multiple physical lines, the warning refers to the
149
- whole range, eg. "lines 3-5". If 'line' supplied, it overrides
150
- the current line number; it may be a list or tuple to indicate a
151
- range of physical lines, or an integer for a single physical
152
- line."""
153
- sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
154
-
155
- def readline(self): # noqa: C901
156
- """Read and return a single logical line from the current file (or
157
- from an internal buffer if lines have previously been "unread"
158
- with 'unreadline()'). If the 'join_lines' option is true, this
159
- may involve reading multiple physical lines concatenated into a
160
- single string. Updates the current line number, so calling
161
- 'warn()' after 'readline()' emits a warning about the physical
162
- line(s) just read. Returns None on end-of-file, since the empty
163
- string can occur if 'rstrip_ws' is true but 'strip_blanks' is
164
- not."""
165
- # If any "unread" lines waiting in 'linebuf', return the top
166
- # one. (We don't actually buffer read-ahead data -- lines only
167
- # get put in 'linebuf' if the client explicitly does an
168
- # 'unreadline()'.
169
- if self.linebuf:
170
- line = self.linebuf[-1]
171
- del self.linebuf[-1]
172
- return line
173
-
174
- buildup_line = ''
175
-
176
- while True:
177
- # read the line, make it None if EOF
178
- line = self.file.readline()
179
- if line == '':
180
- line = None
181
-
182
- if self.strip_comments and line:
183
-
184
- # Look for the first "#" in the line. If none, never
185
- # mind. If we find one and it's the first character, or
186
- # is not preceded by "\", then it starts a comment --
187
- # strip the comment, strip whitespace before it, and
188
- # carry on. Otherwise, it's just an escaped "#", so
189
- # unescape it (and any other escaped "#"'s that might be
190
- # lurking in there) and otherwise leave the line alone.
191
-
192
- pos = line.find("#")
193
- if pos == -1: # no "#" -- no comments
194
- pass
195
-
196
- # It's definitely a comment -- either "#" is the first
197
- # character, or it's elsewhere and unescaped.
198
- elif pos == 0 or line[pos - 1] != "\\":
199
- # Have to preserve the trailing newline, because it's
200
- # the job of a later step (rstrip_ws) to remove it --
201
- # and if rstrip_ws is false, we'd better preserve it!
202
- # (NB. this means that if the final line is all comment
203
- # and has no trailing newline, we will think that it's
204
- # EOF; I think that's OK.)
205
- eol = (line[-1] == '\n') and '\n' or ''
206
- line = line[0:pos] + eol
207
-
208
- # If all that's left is whitespace, then skip line
209
- # *now*, before we try to join it to 'buildup_line' --
210
- # that way constructs like
211
- # hello \\
212
- # # comment that should be ignored
213
- # there
214
- # result in "hello there".
215
- if line.strip() == "":
216
- continue
217
- else: # it's an escaped "#"
218
- line = line.replace("\\#", "#")
219
-
220
- # did previous line end with a backslash? then accumulate
221
- if self.join_lines and buildup_line:
222
- # oops: end of file
223
- if line is None:
224
- self.warn("continuation line immediately precedes " "end-of-file")
225
- return buildup_line
226
-
227
- if self.collapse_join:
228
- line = line.lstrip()
229
- line = buildup_line + line
230
-
231
- # careful: pay attention to line number when incrementing it
232
- if isinstance(self.current_line, list):
233
- self.current_line[1] = self.current_line[1] + 1
234
- else:
235
- self.current_line = [self.current_line, self.current_line + 1]
236
- # just an ordinary line, read it as usual
237
- else:
238
- if line is None: # eof
239
- return None
240
-
241
- # still have to be careful about incrementing the line number!
242
- if isinstance(self.current_line, list):
243
- self.current_line = self.current_line[1] + 1
244
- else:
245
- self.current_line = self.current_line + 1
246
-
247
- # strip whitespace however the client wants (leading and
248
- # trailing, or one or the other, or neither)
249
- if self.lstrip_ws and self.rstrip_ws:
250
- line = line.strip()
251
- elif self.lstrip_ws:
252
- line = line.lstrip()
253
- elif self.rstrip_ws:
254
- line = line.rstrip()
255
-
256
- # blank line (whether we rstrip'ed or not)? skip to next line
257
- # if appropriate
258
- if (line == '' or line == '\n') and self.skip_blanks:
259
- continue
260
-
261
- if self.join_lines:
262
- if line[-1] == '\\':
263
- buildup_line = line[:-1]
264
- continue
265
-
266
- if line[-2:] == '\\\n':
267
- buildup_line = line[0:-2] + '\n'
268
- continue
269
-
270
- # well, I guess there's some actual content there: return it
271
- return line
272
-
273
- def readlines(self):
274
- """Read and return the list of all logical lines remaining in the
275
- current file."""
276
- lines = []
277
- while True:
278
- line = self.readline()
279
- if line is None:
280
- return lines
281
- lines.append(line)
282
-
283
- def unreadline(self, line):
284
- """Push 'line' (a string) onto an internal buffer that will be
285
- checked by future 'readline()' calls. Handy for implementing
286
- a parser with line-at-a-time lookahead."""
287
- self.linebuf.append(line)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/models/encoders/helpers.py DELETED
@@ -1,140 +0,0 @@
1
- from collections import namedtuple
2
- import torch
3
- import torch.nn.functional as F
4
- from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
5
-
6
- """
7
- ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
8
- """
9
-
10
-
11
- class Flatten(Module):
12
- def forward(self, input):
13
- return input.view(input.size(0), -1)
14
-
15
-
16
- def l2_norm(input, axis=1):
17
- norm = torch.norm(input, 2, axis, True)
18
- output = torch.div(input, norm)
19
- return output
20
-
21
-
22
- class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
23
- """ A named tuple describing a ResNet block. """
24
-
25
-
26
- def get_block(in_channel, depth, num_units, stride=2):
27
- return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
28
-
29
-
30
- def get_blocks(num_layers):
31
- if num_layers == 50:
32
- blocks = [
33
- get_block(in_channel=64, depth=64, num_units=3),
34
- get_block(in_channel=64, depth=128, num_units=4),
35
- get_block(in_channel=128, depth=256, num_units=14),
36
- get_block(in_channel=256, depth=512, num_units=3)
37
- ]
38
- elif num_layers == 100:
39
- blocks = [
40
- get_block(in_channel=64, depth=64, num_units=3),
41
- get_block(in_channel=64, depth=128, num_units=13),
42
- get_block(in_channel=128, depth=256, num_units=30),
43
- get_block(in_channel=256, depth=512, num_units=3)
44
- ]
45
- elif num_layers == 152:
46
- blocks = [
47
- get_block(in_channel=64, depth=64, num_units=3),
48
- get_block(in_channel=64, depth=128, num_units=8),
49
- get_block(in_channel=128, depth=256, num_units=36),
50
- get_block(in_channel=256, depth=512, num_units=3)
51
- ]
52
- else:
53
- raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
54
- return blocks
55
-
56
-
57
- class SEModule(Module):
58
- def __init__(self, channels, reduction):
59
- super(SEModule, self).__init__()
60
- self.avg_pool = AdaptiveAvgPool2d(1)
61
- self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
62
- self.relu = ReLU(inplace=True)
63
- self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
64
- self.sigmoid = Sigmoid()
65
-
66
- def forward(self, x):
67
- module_input = x
68
- x = self.avg_pool(x)
69
- x = self.fc1(x)
70
- x = self.relu(x)
71
- x = self.fc2(x)
72
- x = self.sigmoid(x)
73
- return module_input * x
74
-
75
-
76
- class bottleneck_IR(Module):
77
- def __init__(self, in_channel, depth, stride):
78
- super(bottleneck_IR, self).__init__()
79
- if in_channel == depth:
80
- self.shortcut_layer = MaxPool2d(1, stride)
81
- else:
82
- self.shortcut_layer = Sequential(
83
- Conv2d(in_channel, depth, (1, 1), stride, bias=False),
84
- BatchNorm2d(depth)
85
- )
86
- self.res_layer = Sequential(
87
- BatchNorm2d(in_channel),
88
- Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
89
- Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
90
- )
91
-
92
- def forward(self, x):
93
- shortcut = self.shortcut_layer(x)
94
- res = self.res_layer(x)
95
- return res + shortcut
96
-
97
-
98
- class bottleneck_IR_SE(Module):
99
- def __init__(self, in_channel, depth, stride):
100
- super(bottleneck_IR_SE, self).__init__()
101
- if in_channel == depth:
102
- self.shortcut_layer = MaxPool2d(1, stride)
103
- else:
104
- self.shortcut_layer = Sequential(
105
- Conv2d(in_channel, depth, (1, 1), stride, bias=False),
106
- BatchNorm2d(depth)
107
- )
108
- self.res_layer = Sequential(
109
- BatchNorm2d(in_channel),
110
- Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
111
- PReLU(depth),
112
- Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
113
- BatchNorm2d(depth),
114
- SEModule(depth, 16)
115
- )
116
-
117
- def forward(self, x):
118
- shortcut = self.shortcut_layer(x)
119
- res = self.res_layer(x)
120
- return res + shortcut
121
-
122
-
123
- def _upsample_add(x, y):
124
- """Upsample and add two feature maps.
125
- Args:
126
- x: (Variable) top feature map to be upsampled.
127
- y: (Variable) lateral feature map.
128
- Returns:
129
- (Variable) added feature map.
130
- Note in PyTorch, when input size is odd, the upsampled feature map
131
- with `F.upsample(..., scale_factor=2, mode='nearest')`
132
- maybe not equal to the lateral feature map size.
133
- e.g.
134
- original input size: [N,_,15,15] ->
135
- conv2d feature map size: [N,_,8,8] ->
136
- upsampled feature map size: [N,_,16,16]
137
- So we choose bilinear upsample which supports arbitrary output sizes.
138
- """
139
- _, _, H, W = y.size()
140
- return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BIASLab/sars-cov-2-classification-fcgr/src/cgr.py DELETED
@@ -1,77 +0,0 @@
1
- "From original work: CGR for gene structure"
2
- from typing import Dict, Optional
3
- from collections import namedtuple
4
-
5
- # coordinates for x+iy
6
- Coord = namedtuple("Coord", ["x","y"])
7
-
8
- # coordinates for a CGR encoding
9
- CGRCoords = namedtuple("CGRCoords", ["N","x","y"])
10
-
11
- # coordinates for each nucleotide in the 2d-plane
12
- DEFAULT_COORDS = dict(A=Coord(1,1),C=Coord(-1,1),G=Coord(-1,-1),T=Coord(1,-1))
13
-
14
- class CGR:
15
- "Chaos Game Representation for DNA"
16
- def __init__(self, coords: Optional[Dict[chr,tuple]]=None):
17
- self.nucleotide_coords = DEFAULT_COORDS if coords is None else coords
18
- self.cgr_coords = CGRCoords(0,0,0)
19
-
20
- def nucleotide_by_coords(self,x,y):
21
- "Get nucleotide by coordinates (x,y)"
22
- # filter nucleotide by coordinates
23
- filtered = dict(filter(lambda item: item[1] == Coord(x,y), self.nucleotide_coords.items()))
24
-
25
- return list(filtered.keys())[0]
26
-
27
- def forward(self, nucleotide: str):
28
- "Compute next CGR coordinates"
29
- x = (self.cgr_coords.x + self.nucleotide_coords.get(nucleotide).x)/2
30
- y = (self.cgr_coords.y + self.nucleotide_coords.get(nucleotide).y)/2
31
-
32
- # update cgr_coords
33
- self.cgr_coords = CGRCoords(self.cgr_coords.N+1,x,y)
34
-
35
- def backward(self,):
36
- "Compute last CGR coordinates. Current nucleotide can be inferred from (x,y)"
37
- # get current nucleotide based on coordinates
38
- n_x,n_y = self.coords_current_nucleotide()
39
- nucleotide = self.nucleotide_by_coords(n_x,n_y)
40
-
41
- # update coordinates to the previous one
42
- x = 2*self.cgr_coords.x - n_x
43
- y = 2*self.cgr_coords.y - n_y
44
-
45
- # update cgr_coords
46
- self.cgr_coords = CGRCoords(self.cgr_coords.N-1,x,y)
47
-
48
- return nucleotide
49
-
50
- def coords_current_nucleotide(self,):
51
- x = 1 if self.cgr_coords.x>0 else -1
52
- y = 1 if self.cgr_coords.y>0 else -1
53
- return x,y
54
-
55
- def encode(self, sequence: str):
56
- "From DNA sequence to CGR"
57
- # reset starting position to (0,0,0)
58
- self.reset_coords()
59
- for nucleotide in sequence:
60
- self.forward(nucleotide)
61
- return self.cgr_coords
62
-
63
- def reset_coords(self,):
64
- self.cgr_coords = CGRCoords(0,0,0)
65
-
66
- def decode(self, N:int, x:int, y:int)->str:
67
- "From CGR to DNA sequence"
68
- self.cgr_coords = CGRCoords(N,x,y)
69
-
70
- # decoded sequence
71
- sequence = []
72
-
73
- # Recover the entire genome
74
- while self.cgr_coords.N>0:
75
- nucleotide = self.backward()
76
- sequence.append(nucleotide)
77
- return "".join(sequence[::-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/interface/display/index.tsx DELETED
@@ -1,12 +0,0 @@
1
- import { RenderedScene } from "@/types"
2
-
3
- export function Display ({ rendered }: { rendered: RenderedScene }) {
4
- return (
5
- <>
6
- <img
7
- src={rendered.assetUrl || undefined}
8
- className="fixed w-screen top-0 left-0 right-0"
9
- />
10
- </>
11
- )
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_123812KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bazedgul/YoutubeVideo-Transcript-Summarization/app.py DELETED
@@ -1,23 +0,0 @@
1
- import gradio as gr
2
- import transformers
3
- import youtube_transcript_api
4
- from transformers import pipeline
5
- from youtube_transcript_api import YouTubeTranscriptApi
6
- from datasets import Dataset
7
-
8
- summarizer = pipeline("summarization",model="facebook/bart-large-cnn")
9
-
10
- def greet(link):
11
- try:
12
- unique_id = link.split("=")[-1]
13
- sub = YouTubeTranscriptApi.get_transcript(unique_id)
14
- subtitle = " ".join([w['text'] for w in sub])
15
- summary = summarizer(subtitle, max_length=180, min_length=30, do_sample=False)
16
- return summary[0]['summary_text']
17
- except:
18
- return 'Invalid URL'
19
-
20
- demo=gr.Interface(fn=greet, inputs="text", outputs="text")
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Aplicacin Descargar Tirador De Burbujas.md DELETED
@@ -1,94 +0,0 @@
1
- <br />
2
- <h1>App Download Bubble Shooter: Cómo jugar y disfrutar de este divertido juego</h1>
3
- <p>Si usted está buscando un juego de puzzle divertido y adictivo que puede mantenerlo entretenido durante horas, usted debe probar juegos de disparos de burbujas. Los juegos de disparos de burbujas son juegos simples pero desafiantes que requieren que coincidas y hagas estallar burbujas coloridas en la pantalla. En este artículo, te contaremos todo lo que necesitas saber sobre los juegos de disparos de burbujas, cómo descargarlos en tu dispositivo, cómo jugarlos y ganar niveles, y por qué deberías jugarlos. </p>
4
- <h2>¿Qué es Bubble Shooter? </h2>
5
- <p>Bubble shooter es un tipo de juego de puzzle que consiste en disparar burbujas desde un cañón o un lanzador en la parte inferior de la pantalla. El objetivo es hacer coincidir tres o más burbujas del mismo color para hacerlas estallar y limpiar el tablero. El juego termina cuando se borran todas las burbujas o cuando las burbujas llegan a la parte inferior de la pantalla. </p>
6
- <h2>aplicación descargar tirador de burbujas</h2><br /><p><b><b>Download</b> &#10027; <a href="https://bltlly.com/2v6LZ8">https://bltlly.com/2v6LZ8</a></b></p><br /><br />
7
- <h3>La historia y popularidad del juego</h3>
8
- <p>Los juegos de disparos de burbujas tienen una larga historia que se remonta a la década de 1980. El primer juego de disparos de burbujas se llamó Puzzle Bobble, que fue lanzado por Taito en 1986. Fue un spin-off del popular juego de árcade Bubble Bobble, que contó con dos lindos dragones llamados Bub y Bob. Puzzle Bobble se convirtió en un gran éxito en Japón y más tarde en otros países, generando muchas secuelas y clones. </p>
9
- <p>Desde entonces, los juegos de disparos de burbujas han evolucionado y mejorado, añadiendo nuevas características, gráficos, sonidos y elementos de juego. Hoy en día, los juegos de disparos de burbujas son uno de los géneros más populares de los juegos casuales, con millones de jugadores en todo el mundo. Puedes encontrar cientos de juegos de disparos de burbujas en varias plataformas, como navegadores web, dispositivos móviles, consolas y computadoras. </p>
10
- <h3>La jugabilidad y características del juego</h3>
11
-
12
- <p>Los juegos de disparos de burbujas también tienen varias características que los hacen más divertidos y emocionantes. Por ejemplo, algunos juegos tienen diferentes niveles con diferentes diseños, obstáculos, metas y dificultades. Algunos juegos tienen diferentes modos de juego, como el modo puzzle, el modo árcade, el modo clásico, etc. Algunos juegos tienen potenciadores y potenciadores que pueden ayudarte a limpiar el tablero más rápido o superar desafíos. Algunos juegos tienen tablas de clasificación, logros, recompensas, bonos diarios, etc.</p>
13
- <h2>Cómo descargar Bubble Shooter en su dispositivo? </h2>
14
- <p>Si quieres jugar juegos de disparos de burbujas en tu dispositivo, necesitas descargarlos desde la tienda de aplicaciones o el navegador web. Aquí hay algunos pasos sobre cómo descargar juegos de disparos de burbujas en su dispositivo:</p>
15
- <h3>Para usuarios de Android</h3>
16
- <ul>
17
- <li>Ir a Google Play Store en su dispositivo. </li>
18
- <li> Buscar "tirador de burbujas" o cualquier juego de disparos de burbujas específico que desea jugar. </li>
19
- <li>Seleccione el juego que desea descargar y toque en "Instalar". </li>
20
- <li>Espere a que finalice el proceso de descarga e instalación. </li>
21
- <li>¡Abre el juego y disfruta! </li>
22
- </ul>
23
- <h3>Para usuarios de iOS</h3>
24
- <ul>
25
- <li>Ir a App Store en su dispositivo. </li>
26
- <li> Buscar "tirador de burbujas" o cualquier juego de disparos de burbujas específico que desea jugar. </li>
27
- <li>Seleccione el juego que desea descargar y toque en "Obtener". </li>
28
- <li>Introduzca su ID de Apple y contraseña si se le solicita. </li>
29
- <li>Espere a que finalice el proceso de descarga e instalación. </li>
30
- <li>¡Abre el juego y disfruta! </li>
31
- </ul>
32
- <h2>Cómo jugar Bubble Shooter y ganar niveles? </h2>
33
- <p>Ahora que has descargado juegos de disparos de burbujas en tu dispositivo, estás listo para jugar y divertirte. Pero, ¿cómo se juega juegos de burbujas tirador y ganar niveles? Aquí hay algunos consejos y trucos sobre cómo jugar juegos de burbujas tirador y ganar niveles:</p>
34
- <h3>Las reglas y consejos básicos</h3>
35
- <ul>
36
- <li>La regla básica de los juegos de disparos de burbujas es hacer coincidir tres o más burbujas del mismo color para hacerlas estallar y limpiar el tablero. </li>
37
-
38
- <li>Puedes ver la siguiente burbuja en tu lanzador y planificar tus movimientos en consecuencia. También puede intercambiar la burbuja actual con la siguiente pulsando o haciendo clic en el lanzador. </li>
39
- <li>Deberías intentar hacer estallar tantas burbujas como sea posible con cada toma, ya que esto te dará más puntos y despejará el tablero más rápido. </li>
40
- <li> También debe tratar de hacer estallar las burbujas que están sosteniendo otras burbujas, ya que esto hará que caigan y pop, así, creando una reacción en cadena. </li>
41
- <li> Debes evitar disparar burbujas que no coincidan con ningún color en el tablero, ya que esto agregará más burbujas y hará que el tablero esté más lleno. </li>
42
- <li> También debe prestar atención a la línea de fondo del tablero, ya que esto indica lo cerca que están las burbujas para llegar a la parte inferior de la pantalla. Si las burbujas tocan la línea de fondo, perderás el juego. </li>
43
- </ul>
44
- <h3>Los diferentes modos de juego y desafíos</h3>
45
- <p>Los juegos de disparos de burbujas tienen diferentes modos de juego y desafíos que pueden hacer que el juego sea más interesante y desafiante. Algunos de los modos de juego y desafíos comunes son:</p>
46
- <ul>
47
- <li>Modo de rompecabezas: En este modo, usted tiene un número limitado de burbujas para disparar y un objetivo específico para lograr, como limpiar un cierto número de burbujas, hacer estallar un cierto color de burbujas, liberar a los animales atrapados, etc. Necesitas usar tus habilidades de estrategia y lógica para completar cada nivel. </li>
48
- <li>Modo árcade: En este modo, tienes burbujas ilimitadas para disparar, pero el tablero sigue bajando con cada disparo. Necesitas ser rápido y preciso para limpiar el tablero antes de que llegue a la parte inferior de la pantalla. </li>
49
- <li>Modo clásico: En este modo, tienes un juego clásico de disparos de burbujas sin características especiales ni objetivos. Solo tienes que borrar todas las burbujas en el tablero y anotar tantos puntos como sea posible. </li>
50
-
51
- </ul>
52
- <h3>Los potenciadores y potenciadores</h3>
53
- <p>Los juegos de disparos de burbujas también tienen potenciadores y potenciadores que pueden ayudarte a limpiar el tablero más rápido o superar desafíos. Algunos de los potenciadores y potenciadores comunes son:</p>
54
- <ul>
55
- <li>Bomba: Este encendido puede explotar y estallar todas las burbujas en un gran radio a su alrededor. Puedes usarlo para limpiar una gran área de burbujas o romper obstáculos. </li>
56
- <li>Bola de fuego: Este encendido puede quemar y hacer estallar todas las burbujas en una línea recta. Puede usarlo para limpiar una larga fila de burbujas o alcanzar puntos difíciles de conseguir. </li>
57
- <li>Arco iris: Este encendido puede cambiar su color para que coincida con cualquier burbuja en el tablero. Puede usarlo para crear coincidencias con cualquier color de burbujas o objetivos específicos de color completos. </li>
58
- <li>Cambiador de color: Este amplificador puede cambiar el color de todas las burbujas en el tablero a un color. Puede usarlo para borrar todas las burbujas en el tablero o crear reacciones en cadena masivas. </li>
59
- <li>Burbujas adicionales: Este refuerzo puede darle burbujas adicionales para disparar. Puede usarlo cuando se quede sin burbujas o necesite más opciones. </li>
60
- <li>Otros boosters: Algunos juegos de disparos de burbujas tienen otros boosters que pueden variar dependiendo del tema o estilo del juego. Por ejemplo, algunos juegos tienen imanes, láseres, estrellas, etc.</li>
61
- </ul>
62
- <h2>¿Por qué debería jugar Bubble Shooter? </h2>
63
- <p>Los juegos de disparos de burbujas no solo son divertidos y entretenidos, sino también beneficiosos para su cerebro y estado de ánimo. Aquí hay algunas razones por las que debe jugar juegos de disparos de burbujas:</p>
64
- <p></p>
65
- <h3>Los beneficios de jugar juegos de disparos de burbujas</h3>
66
- <ul>
67
- <li>Los juegos de disparos de burbujas pueden mejorar tus habilidades cognitivas, como la memoria, la concentración, la atención, la resolución de problemas y la lógica. Estas habilidades son esenciales para su salud mental y rendimiento en las tareas y actividades diarias. </li>
68
-
69
- <li>Los juegos de disparos de burbujas también pueden aumentar su estado de ánimo y reducir su estrés, ya que pueden proporcionarle una sensación de logro, satisfacción y relajación. También pueden distraerte de pensamientos y emociones negativas y ayudarte a lidiar con la ansiedad y la depresión. </li>
70
- </ul>
71
- <h3>Los aspectos divertidos y relajantes del juego</h3>
72
- <ul>
73
- <li>Los juegos de disparos de burbujas son divertidos y relajantes porque son fáciles de jugar y agradables de ver. Puedes jugar en cualquier momento y en cualquier lugar, ya que no requieren mucho tiempo o esfuerzo. También puede jugar a su propio ritmo y nivel de dificultad, ya que tienen varias opciones y ajustes para adaptarse a sus preferencias. </li>
74
- <li>Los juegos de disparos de burbujas también son divertidos y relajantes porque tienen gráficos coloridos, sonidos alegres y personajes lindos. Puede admirar las hermosas burbujas y fondos, escuchar la música relajante y efectos de sonido, e interactuar con los adorables animales y criaturas. </li>
75
- <li>Los juegos de disparos de burbujas también son divertidos y relajantes porque tienen infinitas posibilidades y variaciones. Nunca puedes aburrirte o quedarte sin niveles para jugar, ya que tienen cientos o miles de niveles con diferentes objetivos y desafíos. También puedes probar diferentes modos de juego y potenciadores para darle vida a tu juego. </li>
76
- </ul>
77
- <h3>Los aspectos sociales y competitivos del juego</h3>
78
- <ul>
79
- <li>Los juegos de disparos de burbujas son sociales y competitivos porque te permiten conectarte e interactuar con otros jugadores de todo el mundo. Usted puede jugar con o contra sus amigos o familiares, o unirse a una comunidad de fans del tirador de burbujas. También puedes chatear, compartir, comentar y seguir a otros jugadores en las plataformas de redes sociales. </li>
80
-
81
- <li>Los juegos de disparos de burbujas también son sociales y competitivos porque te motivan a mejorar tus habilidades y rendimiento en el juego. Puedes aprender de las estrategias y consejos de otros jugadores, o buscar comentarios y consejos de ellos. También puede establecer sus propios objetivos y recompensas, o ganar recompensas del juego en sí. </li>
82
- </ul>
83
- <h2>Conclusión</h2>
84
- <p>Los juegos de disparos de burbujas son uno de los mejores tipos de juegos de puzzle que puedes jugar en tu dispositivo. Son divertidos, adictivos, desafiantes, beneficiosos, relajantes, sociales y competitivos. Pueden mantenerte entretenido durante horas y hacerte feliz e inteligente. Si todavía no has probado los juegos de disparos de burbujas, deberías descargarlos ahora y disfrutar de este increíble juego. </p>
85
- <h2>Preguntas frecuentes</h2>
86
- <ul>
87
- <li>P: ¿Cómo puedo descargar juegos de disparos de burbujas en mi dispositivo? <br>A: Puedes descargar juegos de disparos de burbujas en tu dispositivo desde la tienda de aplicaciones o el navegador web. Solo tienes que buscar "bubble shooter" o cualquier juego de burbujas específico que quieras jugar, seleccionar el juego que quieres descargar y seguir las instrucciones en la pantalla. </li>
88
- <li>Q: ¿Cómo puedo jugar juegos de disparos de burbujas? <br>A: Para jugar juegos de disparos de burbujas, es necesario disparar burbujas desde un lanzador en la parte inferior de la pantalla en el racimo de burbujas en la parte superior de la pantalla. El objetivo es hacer coincidir tres o más burbujas del mismo color para hacerlas estallar y limpiar el tablero. </li>
89
- <li>P: ¿Cuáles son algunos consejos y trucos para ganar juegos de disparos de burbujas? <br>A: Algunos consejos y trucos para ganar juegos de tirador de burbujas son: apuntar cuidadosamente, utilizar las paredes para rebotar sus burbujas, planificar sus movimientos por delante, pop tantas burbujas como sea posible con cada disparo, burbujas pop que están sosteniendo otras burbujas, evitar disparar burbujas que no coinciden con ningún color en el tablero, prestar atención a la línea de fondo del tablero, utilizar potenciadores y potenciadores sabiamente, probar diferentes modos de juego y desafíos, y divertirse! </li>
90
-
91
- <li>P: ¿Cuáles son algunos de los beneficios de jugar juegos de burbujas? <br>A: Jugar juegos de disparos de burbujas puede mejorar tus habilidades cognitivas, mejorar tu creatividad e imaginación, aumentar tu estado de ánimo y reducir el estrés, y conectarte con otros jugadores. </li>
92
- </ul></p> 64aa2da5cf<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar 2016 Dj Mix.md DELETED
@@ -1,131 +0,0 @@
1
-
2
- <h1>Cómo descargar SketchUp 2020 gratis</h1>
3
- <p>Si está buscando un software de modelado 3D potente y fácil de usar, es posible que desee probar SketchUp 2020. SketchUp es una herramienta popular para crear, editar y compartir modelos 3D para diversos fines, como arquitectura, diseño de interiores, ingeniería, paisajismo, juegos y más. En este artículo, te mostraremos cómo descargar SketchUp 2020 de forma gratuita e instalarlo en tu ordenador. También le daremos una visión general de las características y mejoras de SketchUp 2020, así como algunos consejos sobre cómo comenzar a usarlo y aprender más sobre él. </p>
4
- <h2>Qué es SketchUp 2020 y por qué deberías probarlo</h2>
5
- <p>SketchUp 2020 es la última versión de SketchUp Pro, la versión premium de SketchUp que ofrece características y capacidades más avanzadas que la versión gratuita basada en la web. SketchUp Pro es un software de suscripción que cuesta $299 por año o $41.66 por mes. Sin embargo, también puede descargar una versión de prueba gratuita de SketchUp Pro durante 30 días y utilizar todas sus funciones sin limitaciones. </p>
6
- <h2>descargar 2016 dj mix</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash; <a href="https://bltlly.com/2v6Lo9">https://bltlly.com/2v6Lo9</a></b></p><br /><br />
7
- <h3>Características y mejoras de SketchUp 2020</h3>
8
- <p>SketchUp 2020 viene con muchas características y mejoras que lo hacen más intuitivo, eficiente y divertido de usar. Algunos de los aspectos más destacados son:</p>
9
- <ul>
10
- <li><b>Outliner:</b> Ahora puede organizar mejor su modelo usando Outliner para alternar entre la visibilidad de grupos y componentes. También puede guardar el estado de visibilidad de los objetos ocultos por escena. </li>
11
- <li><b>Empuñaduras en cajas delimitadoras:</b> Ahora puedes mover y girar objetos fácilmente usando empuñaduras en sus cajas delimitadoras. También puede recorrer diferentes puntos de agarre presionando la tecla de flecha hacia abajo. </li>
12
- <li><b>Objetos ocultos:</b> Ahora puede editar objetos ocultos seleccionándolos en Outliner. Aparecerán como una malla que puedes modificar sin afectar a otros objetos visibles. </li>
13
-
14
- <li><b>LayOut:</b> Ahora puede crear documentos más profesionales con LayOut, la aplicación complementaria de SketchUp que le permite crear presentaciones 2D a partir de sus modelos 3D. Algunas de las nuevas características incluyen control de peso de línea mejorado, mejor soporte de DWG, estilos de línea personalizados y más. </li>
15
- </ul>
16
- <p>Puede obtener más información sobre las nuevas características y mejoras de SketchUp 2020 de estas fuentes . </p>
17
- <h3>Requisitos del sistema SketchUp 2020 y compatibilidad</h3>
18
- <p>Antes de descargar SketchUp 2020, debe asegurarse de que su computadora cumple con los requisitos mínimos o recomendados del sistema para su funcionamiento sin problemas. Estos son los requisitos del sistema para los sistemas operativos Windows y Mac :</p>
19
- <tabla>
20
- <tr><th></th><th>Mínimo</th><th>Recomendado</th></tr>
21
- <tr><td>CPU</td><td>procesador de 1 GHz o de generación actual procesador Apple M1</td><td>procesador de 2+ GHz o de generación actual procesador Apple M1</td></tr>
22
- <tr><td>GPU</td><td>Tarjeta de video de clase 3D con 512 MB de memoria y admite aceleración de hardware</td><td>Tarjeta de video de clase 3D con 1 GB de memoria y admite aceleración de hardware</td></tr>
23
- <tr><td>RAM</td><td>4 GB</td><td>8+ GB</td></tr>
24
- <tr><td>Almacenamiento</td><td>500 MB de espacio en disco disponible</td><td>700+ MB de espacio en disco disponible</td></tr>
25
- <tr><td>OS</td><td>Windows 10, Windows 8+, Windows 7, macOS 10.15+ (Catalina), macOS 10.14+ (Mojave), macOS 10.13+ (High Sierra)</td><td>Windows 10, macOS 11+ (Big Sur), macOS 10.15+ (Catalina)/td><><tr>/tr>
26
- <tr><td>Internet</td><td>Se requiere una conexión a Internet para instalar y autorizar SketchUp y usar algunas de las funciones. </td><td>Se requiere una conexión a Internet para instalar y autorizar SketchUp y usar algunas de las características. </td></tr>
27
- </tabla>
28
- <p>También debe comprobar la compatibilidad de SketchUp 2020 con otros programas y extensiones que utiliza, como motores de renderizado, programas CAD, herramientas BIM, etc. Puede encontrar una lista de software y extensiones compatibles aquí. </p>
29
-
30
- <p>Ahora que sabes lo que es SketchUp 2020 y lo que puede hacer, es posible que se pregunte cómo descargarlo de forma gratuita. Hay dos formas de hacerlo: descargando la versión de prueba gratuita desde el sitio web oficial o descargando el instalador sin conexión para Windows.</p>
31
- <p></p>
32
- <h3>Descargar SketchUp 2020 prueba gratuita desde el sitio web oficial</h3>
33
- <p>La forma más fácil de descargar SketchUp 2020 gratis es obtener la prueba gratuita desde el sitio web oficial. Estos son los pasos para hacer eso:</p>
34
- <ol>
35
- <li>Vaya al sitio web de SketchUp y haga clic en el botón <b>Descargar prueba gratuita</b>. </li>
36
- <li>Seleccione su industria, rol y nivel de experiencia en los menús desplegables y haga clic en <b>Continuar</b>. </li>
37
- <li>Ingrese su nombre, dirección de correo electrónico, país y acepte los términos y condiciones. Luego haga clic en <b>Enviar</b>. </li>
38
- <li>Recibirá un correo electrónico con un enlace para descargar SketchUp 2020. Haga clic en el enlace y guarde el archivo en su computadora. </li>
39
- <li>Has descargado SketchUp 2020 gratis. Ahora puedes instalarlo en tu ordenador. </li>
40
- </ol>
41
- <p>Tenga en cuenta que la prueba gratuita caducará después de 30 días y tendrá que comprar una suscripción o iniciar sesión con una cuenta existente para continuar usando SketchUp Pro.</p>
42
- <h3>Descargar SketchUp 2020 instalador sin conexión para Windows</h3>
43
- <p>Si prefiere descargar el instalador sin conexión SketchUp 2020 para Windows, puede hacerlo siguiendo estos pasos:</p>
44
- <ol>
45
- <li>Ve a esta página y desplázate hacia abajo para encontrar la sección <b>SketchUp Pro 2020 - Windows (64 bits)</b>. </li>
46
- <li>Haga clic en el botón <b>Descargar ahora</b> y guarde el archivo en su computadora. </li>
47
- <li> Ha descargado correctamente el instalador sin conexión SketchUp 2020 para Windows. Ahora puede instalarlo en su computadora. </li>
48
- </ol>
49
- <p>Tenga en cuenta que este método solo funciona para los usuarios de Windows y que todavía necesitará una conexión a Internet para activar su licencia o iniciar sesión con su cuenta. </p>
50
- <h2>Cómo instalar SketchUp 2020 en su computadora</h2>
51
-
52
- <h3>Ejecute el instalador de SketchUp 2020 y siga las instrucciones</h3>
53
- <p>Si descargó la versión de prueba gratuita desde el sitio web oficial, tendrá un archivo llamado <b>SketchUpPro-2020-en.exe</b>. Si ha descargado el instalador sin conexión para Windows, tendrá un archivo llamado <b>SketchUpPro-2020-2-172-22215-en-x64.exe</b>. Haga doble clic en el archivo para ejecutar el instalador y siga las instrucciones en la pantalla. Es posible que necesite otorgar permiso o ingresar su contraseña si su sistema se lo solicita. </p>
54
- <p>El instalador le guiará a través del proceso de instalación, que puede tardar unos minutos dependiendo de su sistema. Puede elegir la carpeta de destino, el idioma, los componentes y los accesos directos para SketchUp 2020. También puede optar por instalar LayOut, Style Builder y Trimble Connect si lo desea. </p>
55
- <p>Cuando la instalación esté completa, haga clic en <b>Finalizar</b>. </p>
56
- <h3>Active su licencia SketchUp 2020 o inicie sesión con su cuenta</h3>
57
- <p>Después de haber instalado SketchUp 2020 en su computadora, necesitará activar su licencia o iniciar sesión con su cuenta para usarla. Estos son los pasos para hacerlo:</p>
58
- <ol>
59
- <li>Inicie SketchUp 2020 desde su escritorio o menú de inicio. </li>
60
- <li>Verá una pantalla de bienvenida con dos opciones: <b>Inicio de prueba</b> y <b>Inicio de sesión</b>. </li>
61
- <li>Si desea utilizar la prueba gratuita, haga clic en <b>Iniciar prueba</b>. Verá una cuenta atrás de los días restantes de su período de prueba. Puede utilizar todas las funciones de SketchUp Pro durante 30 días sin ninguna restricción. </li>
62
- <li>Si tiene una suscripción o una licencia clásica, haga clic en <b>Iniciar sesión</b>. Será redirigido a una página web donde podrá introducir su dirección de correo electrónico y contraseña. Si no tiene una cuenta, puede crearla de forma gratuita. </li>
63
- <li>Después de iniciar sesión, verá un mensaje de confirmación de que su licencia está activada o que ha iniciado sesión con su cuenta. Ahora puedes usar SketchUp 2020 según tu plan. </li>
64
- </ol>
65
-
66
- <h2>Cómo empezar a usar SketchUp 2020 y aprender más sobre él</h2>
67
- <p>Ahora que ha descargado e instalado SketchUp 2020 de forma gratuita, es posible que se pregunte cómo empezar a usarlo y aprender más sobre él. Aquí hay algunos consejos para ayudarle a empezar:</p>
68
- <h3>Lanza SketchUp 2020 y explora la interfaz y las herramientas</h3>
69
- <p>Cuando inicie SketchUp 2020, verá un espacio de trabajo en blanco con un modelo 3D predeterminado de una persona. Puede utilizar el ratón y el teclado para navegar por el modelo y acercar y alejar. También puede cambiar la perspectiva y la vista desde diferentes ángulos. </p>
70
- <p>También verá una barra de herramientas en la parte superior de la pantalla con varias herramientas e iconos. Puede utilizar estas herramientas para crear, modificar, medir y anotar sus modelos 3D. También puede acceder a más herramientas desde los menús desplegables o haciendo clic derecho en el modelo. </p>
71
- <p>Puede personalizar la barra de herramientas agregando o quitando herramientas, cambiando su orden o acoplándolas a diferentes ubicaciones. También puede cambiar entre diferentes conjuntos de herramientas haciendo clic en el icono de flecha en el extremo derecho de la barra de herramientas. </p>
72
- <p>También puedes abrir otros paneles y ventanas desde el menú <b>Window</b>, como Outliner, Entity Info, Layers, Materials, Styles, Scenes, etc. Estos paneles y ventanas te ayudarán a organizar, editar y mejorar tus modelos 3D. </p>
73
- <h3>Acceder a la ayuda de SketchUp 2020, tutoriales y recursos de la comunidad</h3>
74
- <p>Si necesita ayuda u orientación sobre cómo usar SketchUp 2020, puede acceder a varios recursos desde el menú <b>Ayuda</b> en SketchUp 2020. Algunos de los recursos son:</p>
75
- <ul>
76
- <li><b>SketchUp Help Center:</b> Este es el centro oficial de ayuda en línea para SketchUp que contiene artículos, videos, consejos y preguntas frecuentes sobre varios temas relacionados con SketchUp. Puede buscar temas específicos o buscar por categorías. </li>
77
-
78
- <li><b>SketchUp Forum:</b> Este es el foro oficial de la comunidad en línea para usuarios de SketchUp donde puedes hacer preguntas, compartir ideas, obtener comentarios y aprender de otros usuarios. Puedes unirte a discusiones sobre varios temas o iniciar tus propios temas. </li>
79
- <li><b>SketchUp YouTube Channel:</b> Este es el canal oficial de YouTube para SketchUp que presenta videos sobre varios temas relacionados con SketchUp. Puedes ver vídeos sobre nuevas funciones, consejos y trucos, estudios de casos, eventos en vivo, etc.</li>
80
- </ul>
81
- <p>También puedes encontrar más recursos de otras fuentes como blogs, podcasts, libros, revistas, etc. que cubren SketchUp y temas relacionados. </p>
82
- <h2>Conclusión y preguntas frecuentes</h2>
83
- <p>En este artículo, le hemos mostrado cómo descargar SketchUp 2020 gratis e instalarlo en su computadora. También le hemos dado una visión general de las características y mejoras de SketchUp 2020, así como algunos consejos sobre cómo comenzar a usarlo y aprender más sobre él. </p>
84
- <p>Esperamos que este artículo haya sido útil y que haya disfrutado aprendiendo sobre SketchUp 2020. SketchUp 2020 es un software de modelado 3D potente y fácil de usar que puede usar para diversos fines, como arquitectura, diseño de interiores, ingeniería, paisajismo, juegos y más. Puedes descargar SketchUp 2020 gratis y usarlo durante 30 días sin limitaciones. También puedes acceder a varios recursos para ayudarte a aprender y mejorar tus habilidades en SketchUp 2020. </p>
85
- <p>Si tiene alguna pregunta o comentario sobre SketchUp 2020, no dude en dejar un comentario a continuación o en contacto con nosotros a través de nuestro sitio web. Nos encantaría saber de ti y ayudarte con tus necesidades de modelado 3D. </p>
86
- <p>Aquí hay algunas preguntas frecuentes que puedes encontrar útiles:</p>
87
- <h4>Q: ¿Cómo puedo desinstalar SketchUp 2020 de mi ordenador? </h4>
88
- <p>A: Si desea desinstalar SketchUp 2020 desde su computadora, puede hacerlo siguiendo estos pasos:</p>
89
- <ol>
90
- <li>Vaya al Panel de Control </b> en su computadora y seleccione <b>Programas y Características</b>. </li>
91
-
92
- <li>Haga clic en el botón <b>Desinstalar</b> y siga las instrucciones en la pantalla. </li>
93
- <li> Ha desinstalado con éxito SketchUp 2020 desde su ordenador. </li>
94
- </ol>
95
- <h4>Q: ¿Cómo puedo actualizar SketchUp 2020 a la última versión? </h4>
96
- <p>A: Si desea actualizar SketchUp 2020 a la última versión, puede hacerlo siguiendo estos pasos:</p>
97
- <ol>
98
- <li>Inicie SketchUp 2020 en su computadora y vaya al menú <b>Ayuda</b>. </li>
99
- <li>Seleccione <b>Buscar actualización</b> y espere unos segundos. </li>
100
- <li>Si hay una nueva versión disponible, verá un mensaje con un enlace para descargarlo. Haga clic en el enlace y guarde el archivo en su computadora. </li>
101
- <li> Ejecute el archivo y siga las instrucciones en la pantalla para instalar la actualización. </li>
102
- <li>Ha actualizado con éxito SketchUp 2020 a la última versión. </li>
103
- </ol>
104
- <h4>Q: ¿Cómo puedo exportar mi modelo SketchUp 2020 a otros formatos? </h4>
105
- <p>A: Si desea exportar su modelo SketchUp 2020 a otros formatos, como PDF, DWG, STL, etc., puede hacerlo siguiendo estos pasos:</p>
106
- <ol>
107
- <li>Seleccione el modelo o la parte del modelo que desea exportar en SketchUp 2020. </li>
108
- <li>Vaya al menú <b>Archivo</b> y seleccione <b>Exportar</b>. </li>
109
- <li>Seleccione el formato al que desea exportar su modelo desde el submenú. Por ejemplo, si desea exportar su modelo como un archivo PDF, seleccione <b>2D Graphic</b> y luego seleccione <b>PDF</b>. </li>
110
- <li> Aparecerá un cuadro de diálogo donde puede elegir el nombre, la ubicación y las opciones para su archivo exportado. Haga clic en <b>Exportar</b>. </li>
111
- <li> Ha exportado con éxito su modelo SketchUp 2020 a otro formato. </li>
112
- </ol>
113
- <h4>Q: ¿Cómo puedo importar otros modelos o archivos en SketchUp 2020? </h4>
114
- <p>A: Si desea importar otros modelos o archivos en SketchUp 2020, como imágenes, archivos CAD, modelos 3D, etc., puede hacerlo siguiendo estos pasos:</p>
115
- <ol>
116
- <li>Vaya al menú <b>Archivo</b> y seleccione <b>Importar</b>. </li>
117
-
118
- <li>Haga clic en <b>Importar</b>. </li>
119
- <li>Ha importado con éxito otro modelo o archivo en SketchUp 2020. Ahora puede moverlo, escalarlo, rotarlo o editarlo como desee. </li>
120
- </ol>
121
- <h4>Q: ¿Cómo puedo compartir mi modelo SketchUp 2020 con otros? </h4>
122
- <p>A: Si desea compartir su modelo SketchUp 2020 con otros, puede hacerlo siguiendo estos pasos:</p>
123
- <ol>
124
- <li>Seleccione el modelo o la parte del modelo que desea compartir en SketchUp 2020. </li>
125
- <li>Vaya al menú <b>Archivo</b> y seleccione <b>Modelo compartido</b>. </li>
126
- <li>Aparecerá un cuadro de diálogo donde puede iniciar sesión con su cuenta Recortable o crear una gratis. También verá algunas opciones para su modelo compartido, como título, descripción, etiquetas, privacidad, etc.</li>
127
- <li>Haga clic en <b>Subir</b>. </li>
128
- <li>Ha compartido con éxito su modelo SketchUp 2020 con otros. Verá un enlace a su modelo en el almacén 3D, donde puede verlo, descargarlo o incrustarlo. También puede compartir el enlace con otros a través de correo electrónico, redes sociales u otras plataformas. </li>
129
- </ol></p> 64aa2da5cf<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Cinco Noches En Freddy 39s 3 Apk.md DELETED
@@ -1,101 +0,0 @@
1
-
2
- <h1>Cómo descargar cinco noches en Freddy’s 3 APK</h1>
3
- <p>Si eres un fan de los juegos de terror, es posible que hayas oído hablar de Five Nights at Freddy’s, una popular serie de juegos de terror de supervivencia desarrollados por Scott Cawthon. La tercera entrega de la serie, Five Nights at Freddy’s 3, fue lanzada en 2015 y recibió críticas positivas de críticos y jugadores por igual. En este artículo, te diremos qué es Five Nights at Freddy’s 3, qué es un archivo APK y cómo descargar e instalar Five Nights at Freddy’s 3 APK en tus dispositivos. </p>
4
- <h2>¿Qué es Cinco Noches en Freddy’s 3?</h2>
5
- <h3>La trama y el juego del juego</h3>
6
- <p>Five Nights at Freddy’s 3 se desarrolla treinta años después de los eventos del primer juego, en una atracción de terror llamada "Fazbear’s Fright". El jugador toma el papel de un guardia de seguridad que debe sobrevivir cinco noches (y una sexta noche extra) mientras es perseguido por un animatrónico decrépito llamado Springtrap y varias versiones fantasma de los animatrónicos originales. El jugador debe monitorear dos juegos de cámaras, una para las habitaciones y pasillos, y otra para los conductos de ventilación, y usar varios dispositivos como señales de audio, controles de ventilación y un panel de reinicio para evitar que Springtrap llegue a la oficina. A diferencia de los juegos anteriores, solo hay un animatronic que puede matar al jugador, pero los fantasmas pueden asustar al jugador y causar fallos en los sistemas. </p>
7
- <h2>descargar cinco noches en freddy 39;s 3 apk</h2><br /><p><b><b>Download File</b> &#10040;&#10040;&#10040; <a href="https://bltlly.com/2v6LNI">https://bltlly.com/2v6LNI</a></b></p><br /><br />
8
- <h3>Las características y requisitos del juego</h3>
9
- <p>Five Nights at Freddy’s 3 tiene varias características que lo hacen diferente de sus predecesores, como:</p>
10
- <ul>
11
- <li>Un nuevo animatrónico, Springtrap, que puede cambiar su apariencia dependiendo de su posición y daño. </li>
12
- <li>Un nuevo mecánico, señales de audio, que se puede utilizar para atraer a Springtrap lejos del jugador. </li>
13
- <li>Un nuevo sistema, ventilación, que puede afectar la visión y la respiración del jugador si no se mantiene correctamente. </li>
14
- <li>Un nuevo modo, modo pesadilla, que se puede desbloquear después de completar la sexta noche. </li>
15
-
16
- </ul>
17
- <p>El juego requiere Android 4.1 o superior para dispositivos móviles, Windows XP o superior para dispositivos PC e iOS 8.0 o posterior para dispositivos Apple. El juego también requiere 250 MB de espacio de almacenamiento y 1 GB de RAM.</p>
18
- <h2>¿Qué es un archivo APK? </h2>
19
- <h3>La definición y el propósito de un archivo APK</h3>
20
- <p>Un archivo APK significa Paquete Android Kit o Paquete de aplicación Android. Es un formato de archivo utilizado por el sistema operativo Android para distribuir e instalar aplicaciones. Un archivo APK contiene todos los componentes de una aplicación, como código, recursos, activos, certificados y manifiesto. Un archivo APK se puede descargar de varias fuentes, como Google Play Store, sitios web de terceros, o directamente de los desarrolladores. </p>
21
- <h3>Las ventajas y los riesgos de instalar archivos APK</h3>
22
- <p>La instalación de archivos APK puede tener algunas ventajas sobre la instalación de aplicaciones de Google Play Store, como:</p>
23
- <ul>
24
- <li>Acceder a aplicaciones que no están disponibles en su región o dispositivo. </li>
25
- <li>Obtener actualizaciones más rápido que esperar a las versiones oficiales. </li>
26
- <li>Probar versiones beta o versiones modificadas de aplicaciones. </li>
27
- <li>Ahorro de ancho de banda mediante la descarga de archivos más pequeños. </li>
28
- </ul>
29
- <p>Sin embargo, la instalación de archivos APK también viene con algunos riesgos, como:</p>
30
- <ul>
31
- <li>Exponer su dispositivo a malware o virus que pueden dañar sus datos o sistema. </li>
32
- <li>Violar los términos y condiciones de los desarrolladores de aplicaciones o editores. </li>
33
- <li>Rompiendo la funcionalidad o compatibilidad de la aplicación o dispositivo. </li>
34
- <li>Anular la garantía o soporte del dispositivo o aplicación. </li>
35
- </ul>
36
- <p>Por lo tanto, es importante tener cuidado y precaución al descargar e instalar archivos APK. Siempre debe comprobar el origen, los permisos y las revisiones del archivo APK antes de instalarlo. También debe escanear el archivo APK con un software antivirus y hacer una copia de seguridad de sus datos antes de instalarlo. </p>
37
- <h2> ¿Cómo descargar e instalar Five Nights at Freddy’s 3 APK? </h2>
38
-
39
- <p>Una de las fuentes de confianza para descargar Five Nights at Freddy’s 3 APK es APKPure.com, un sitio web que proporciona archivos APK seguros y verificados para varias aplicaciones y juegos. Para descargar el archivo APK de APKPure.com, debe seguir estos pasos:</p>
40
- <ol>
41
- <li>Vaya a <a href="">APKPure.com</a> en su navegador. </li>
42
- <li>Buscar "Cinco noches en Freddy’s 3" en la barra de búsqueda. </li>
43
- <li>Seleccione el juego de los resultados de búsqueda y haga clic en "Descargar APK". </li>
44
- <li>Elija una ubicación de descarga y espere a que la descarga termine. </li>
45
- </ol>
46
- <h3>Los pasos para habilitar fuentes desconocidas e instalar el archivo APK en dispositivos Android</h3>
47
- <p>Para instalar el archivo APK en su dispositivo Android, es necesario habilitar fuentes desconocidas, que le permite instalar aplicaciones de fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, debe seguir estos pasos:</p>
48
- <p></p>
49
- <ol>
50
- <li>Ir a "Configuración" en su dispositivo. </li>
51
- <li>Seleccione "Seguridad" o "Privacidad" dependiendo del modelo de su dispositivo. </li>
52
- <li> Buscar y activar "Fuentes desconocidas" o "Instalar aplicaciones desconocidas". </li>
53
- <li>Confirme su elección tocando "OK" o "Permitir". </li>
54
- </ol>
55
- <p>Para instalar el archivo APK en tu dispositivo Android, debes seguir estos pasos:</p>
56
- <ol>
57
- <li>Ir a la ubicación de descarga del archivo APK en su dispositivo. </li>
58
- <li>Toque en el archivo APK y seleccione "Instalar". </li>
59
- <li> Espere a que la instalación se complete y toque "Abrir" o "Hecho". </li>
60
- </ol>
61
- <h3>Los pasos para usar un emulador y abrir el archivo APK en dispositivos Windows</h3>
62
- <p>Para abrir el archivo APK en su dispositivo Windows, es necesario utilizar un emulador, que es un software que simula un entorno Android en su PC. Uno de los emuladores populares es BlueStacks, que puede descargar desde <a href="">BlueStacks.com</a>. Para usar BlueStacks y abrir el archivo APK en tu dispositivo Windows, debes seguir estos pasos:</p>
63
- <ol>
64
- <li>Vaya a <a href=">BlueStacks.com</a> en su navegador y haga clic en "Descargar BlueStacks". </li>
65
-
66
- <li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
67
- <li>Arrastre y suelte el archivo APK en BlueStacks o haga clic en "Instalar APK" en la barra lateral. </li>
68
- <li>Seleccione el archivo APK de su PC y haga clic en "Abrir". </li>
69
- <li>Espera a que BlueStacks instale y ejecute el juego. </li>
70
- </ol>
71
- <h2>Conclusión</h2>
72
- <p>En este artículo, hemos explicado qué es Five Nights at Freddy’s 3, qué es un archivo APK, y cómo descargar e instalar Five Nights at Freddy’s 3 APK en sus dispositivos. Esperamos que este artículo te haya ayudado a disfrutar de este emocionante juego de terror. Sin embargo, también le recordamos que sea cuidadoso y responsable al descargar e instalar archivos APK, ya que pueden plantear algunos riesgos para sus dispositivos o datos. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
73
- <h2>Preguntas frecuentes</h2>
74
- <h4>Q: ¿Cinco noches en Freddy’s 3 es gratis? </h4>
75
- <p>A: Cinco noches en Freddy’s 3 no es gratis. Cuesta $2.99 en Google Play Store, $4.99 en Steam y $7.99 en App Store. Sin embargo, puedes descargarlo gratis usando un archivo APK de una fuente confiable. </p>
76
- <h4>Q: ¿Cinco noches en Freddy’s 3 dan miedo? </h4>
77
- <p>A: Five Nights at Freddy’s 3 es un juego de terror que involucra sustos de salto, sonidos espeluznantes e imágenes perturbadoras. No es adecuado para niños o personas que se asustan fácilmente o tienen problemas cardíacos. Si estás buscando un desafío y una emoción, puedes disfrutar de este juego. </p>
78
- <h4> Q: Cómo desinstalar cinco noches en Freddy 3 APK? </h4>
79
- <p>A: Para desinstalar Five Nights at Freddy’s 3 APK, es necesario seguir estos pasos:</p>
80
- <ol>
81
- <li>Ir a "Configuración" en su dispositivo. </li>
82
- <li>Seleccione "Aplicaciones" o "Aplicaciones" dependiendo del modelo de su dispositivo. </li>
83
- <li>Encuentra y toca "Cinco noches en Freddy’s 3". </li>
84
- <li>Toque en "Desinstalar" y confirme su elección. </li>
85
- </ol>
86
- <h4>Q: Cómo actualizar cinco noches en el 3 APK de Freddy? </h4>
87
- <p>A: Para actualizar Five Nights at Freddy’s 3 APK, debe seguir estos pasos:</p>
88
- <ol>
89
-
90
- <li>Si existe, descargue el nuevo archivo APK y siga los mismos pasos que instalarlo. </li>
91
- <li>Si no lo hay, puede esperar la actualización oficial de Google Play Store o Steam, o consultar otras fuentes para actualizaciones. </li>
92
- </ol>
93
- <h4>Q: ¿Cómo jugar cinco noches en Freddy’s 3 en línea? </h4>
94
- <p>A: Five Nights at Freddy’s 3 es un juego para un solo jugador que no tiene un modo en línea. Sin embargo, puedes jugar online usando un emulador basado en el navegador, como <a href="">Gamejolt.com</a>. Para jugar online, debes seguir estos pasos:</p>
95
- <ol>
96
- <li>Vaya a <a href=">Gamejolt.com</a> en su navegador y busque "Cinco noches en Freddy’s 3". </li>
97
- <li>Seleccione el juego de los resultados de búsqueda y haga clic en "Jugar Juego". </li>
98
- <li>Espera a que el juego se cargue y disfrute. </li>
99
- </ol></p> 64aa2da5cf<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/Dockerfile DELETED
@@ -1,17 +0,0 @@
1
- # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
- # you will also find guides on how best to write your Dockerfile
3
-
4
- FROM node:19
5
-
6
- RUN npm install -g pm2
7
-
8
- WORKDIR /app
9
-
10
- COPY --link --chown=1000 . .
11
-
12
- RUN npm i
13
-
14
- RUN --mount=type=secret,id=DOTENV_LOCAL,dst=.env.local npm run build
15
- CMD pm2 kill
16
- CMD echo $CPU_CORES
17
- CMD pm2 start build/index.js -i $CPU_CORES --no-daemon
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Scripts/Activate.ps1 DELETED
@@ -1,502 +0,0 @@
1
- <#
2
- .Synopsis
3
- Activate a Python virtual environment for the current PowerShell session.
4
-
5
- .Description
6
- Pushes the python executable for a virtual environment to the front of the
7
- $Env:PATH environment variable and sets the prompt to signify that you are
8
- in a Python virtual environment. Makes use of the command line switches as
9
- well as the `pyvenv.cfg` file values present in the virtual environment.
10
-
11
- .Parameter VenvDir
12
- Path to the directory that contains the virtual environment to activate. The
13
- default value for this is the parent of the directory that the Activate.ps1
14
- script is located within.
15
-
16
- .Parameter Prompt
17
- The prompt prefix to display when this virtual environment is activated. By
18
- default, this prompt is the name of the virtual environment folder (VenvDir)
19
- surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
-
21
- .Example
22
- Activate.ps1
23
- Activates the Python virtual environment that contains the Activate.ps1 script.
24
-
25
- .Example
26
- Activate.ps1 -Verbose
27
- Activates the Python virtual environment that contains the Activate.ps1 script,
28
- and shows extra information about the activation as it executes.
29
-
30
- .Example
31
- Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
- Activates the Python virtual environment located in the specified location.
33
-
34
- .Example
35
- Activate.ps1 -Prompt "MyPython"
36
- Activates the Python virtual environment that contains the Activate.ps1 script,
37
- and prefixes the current prompt with the specified string (surrounded in
38
- parentheses) while the virtual environment is active.
39
-
40
- .Notes
41
- On Windows, it may be required to enable this Activate.ps1 script by setting the
42
- execution policy for the user. You can do this by issuing the following PowerShell
43
- command:
44
-
45
- PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
-
47
- For more information on Execution Policies:
48
- https://go.microsoft.com/fwlink/?LinkID=135170
49
-
50
- #>
51
- Param(
52
- [Parameter(Mandatory = $false)]
53
- [String]
54
- $VenvDir,
55
- [Parameter(Mandatory = $false)]
56
- [String]
57
- $Prompt
58
- )
59
-
60
- <# Function declarations --------------------------------------------------- #>
61
-
62
- <#
63
- .Synopsis
64
- Remove all shell session elements added by the Activate script, including the
65
- addition of the virtual environment's Python executable from the beginning of
66
- the PATH variable.
67
-
68
- .Parameter NonDestructive
69
- If present, do not remove this function from the global namespace for the
70
- session.
71
-
72
- #>
73
- function global:deactivate ([switch]$NonDestructive) {
74
- # Revert to original values
75
-
76
- # The prior prompt:
77
- if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
- Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
- Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
- }
81
-
82
- # The prior PYTHONHOME:
83
- if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
- Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
- Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
- }
87
-
88
- # The prior PATH:
89
- if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
- Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
- Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
- }
93
-
94
- # Just remove the VIRTUAL_ENV altogether:
95
- if (Test-Path -Path Env:VIRTUAL_ENV) {
96
- Remove-Item -Path env:VIRTUAL_ENV
97
- }
98
-
99
- # Just remove VIRTUAL_ENV_PROMPT altogether.
100
- if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
- Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
- }
103
-
104
- # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
- if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
- Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
- }
108
-
109
- # Leave deactivate function in the global namespace if requested:
110
- if (-not $NonDestructive) {
111
- Remove-Item -Path function:deactivate
112
- }
113
- }
114
-
115
- <#
116
- .Description
117
- Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
- given folder, and returns them in a map.
119
-
120
- For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
- two strings separated by `=` (with any amount of whitespace surrounding the =)
122
- then it is considered a `key = value` line. The left hand string is the key,
123
- the right hand is the value.
124
-
125
- If the value starts with a `'` or a `"` then the first and last character is
126
- stripped from the value before being captured.
127
-
128
- .Parameter ConfigDir
129
- Path to the directory that contains the `pyvenv.cfg` file.
130
- #>
131
- function Get-PyVenvConfig(
132
- [String]
133
- $ConfigDir
134
- ) {
135
- Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
-
137
- # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
- $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
-
140
- # An empty map will be returned if no config file is found.
141
- $pyvenvConfig = @{ }
142
-
143
- if ($pyvenvConfigPath) {
144
-
145
- Write-Verbose "File exists, parse `key = value` lines"
146
- $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
-
148
- $pyvenvConfigContent | ForEach-Object {
149
- $keyval = $PSItem -split "\s*=\s*", 2
150
- if ($keyval[0] -and $keyval[1]) {
151
- $val = $keyval[1]
152
-
153
- # Remove extraneous quotations around a string value.
154
- if ("'""".Contains($val.Substring(0, 1))) {
155
- $val = $val.Substring(1, $val.Length - 2)
156
- }
157
-
158
- $pyvenvConfig[$keyval[0]] = $val
159
- Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
- }
161
- }
162
- }
163
- return $pyvenvConfig
164
- }
165
-
166
-
167
- <# Begin Activate script --------------------------------------------------- #>
168
-
169
- # Determine the containing directory of this script
170
- $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
- $VenvExecDir = Get-Item -Path $VenvExecPath
172
-
173
- Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
- Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
- Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
-
177
- # Set values required in priority: CmdLine, ConfigFile, Default
178
- # First, get the location of the virtual environment, it might not be
179
- # VenvExecDir if specified on the command line.
180
- if ($VenvDir) {
181
- Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
- }
183
- else {
184
- Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
- $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
- Write-Verbose "VenvDir=$VenvDir"
187
- }
188
-
189
- # Next, read the `pyvenv.cfg` file to determine any required value such
190
- # as `prompt`.
191
- $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
-
193
- # Next, set the prompt from the command line, or the config file, or
194
- # just use the name of the virtual environment folder.
195
- if ($Prompt) {
196
- Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
- }
198
- else {
199
- Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
- if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
- Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
- $Prompt = $pyvenvCfg['prompt'];
203
- }
204
- else {
205
- Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
- Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
- $Prompt = Split-Path -Path $venvDir -Leaf
208
- }
209
- }
210
-
211
- Write-Verbose "Prompt = '$Prompt'"
212
- Write-Verbose "VenvDir='$VenvDir'"
213
-
214
- # Deactivate any currently active virtual environment, but leave the
215
- # deactivate function in place.
216
- deactivate -nondestructive
217
-
218
- # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
- # that there is an activated venv.
220
- $env:VIRTUAL_ENV = $VenvDir
221
-
222
- if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
223
-
224
- Write-Verbose "Setting prompt to '$Prompt'"
225
-
226
- # Set the prompt to include the env name
227
- # Make sure _OLD_VIRTUAL_PROMPT is global
228
- function global:_OLD_VIRTUAL_PROMPT { "" }
229
- Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
230
- New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
231
-
232
- function global:prompt {
233
- Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
234
- _OLD_VIRTUAL_PROMPT
235
- }
236
- $env:VIRTUAL_ENV_PROMPT = $Prompt
237
- }
238
-
239
- # Clear PYTHONHOME
240
- if (Test-Path -Path Env:PYTHONHOME) {
241
- Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
242
- Remove-Item -Path Env:PYTHONHOME
243
- }
244
-
245
- # Add the venv to the PATH
246
- Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
247
- $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
248
-
249
- # SIG # Begin signature block
250
- # MIIvIQYJKoZIhvcNAQcCoIIvEjCCLw4CAQExDzANBglghkgBZQMEAgEFADB5Bgor
251
- # BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
252
- # KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBnL745ElCYk8vk
253
- # dBtMuQhLeWJ3ZGfzKW4DHCYzAn+QB6CCE8MwggWQMIIDeKADAgECAhAFmxtXno4h
254
- # MuI5B72nd3VcMA0GCSqGSIb3DQEBDAUAMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK
255
- # EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNV
256
- # BAMTGERpZ2lDZXJ0IFRydXN0ZWQgUm9vdCBHNDAeFw0xMzA4MDExMjAwMDBaFw0z
257
- # ODAxMTUxMjAwMDBaMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ
258
- # bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNVBAMTGERpZ2lDZXJ0
259
- # IFRydXN0ZWQgUm9vdCBHNDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
260
- # AL/mkHNo3rvkXUo8MCIwaTPswqclLskhPfKK2FnC4SmnPVirdprNrnsbhA3EMB/z
261
- # G6Q4FutWxpdtHauyefLKEdLkX9YFPFIPUh/GnhWlfr6fqVcWWVVyr2iTcMKyunWZ
262
- # anMylNEQRBAu34LzB4TmdDttceItDBvuINXJIB1jKS3O7F5OyJP4IWGbNOsFxl7s
263
- # Wxq868nPzaw0QF+xembud8hIqGZXV59UWI4MK7dPpzDZVu7Ke13jrclPXuU15zHL
264
- # 2pNe3I6PgNq2kZhAkHnDeMe2scS1ahg4AxCN2NQ3pC4FfYj1gj4QkXCrVYJBMtfb
265
- # BHMqbpEBfCFM1LyuGwN1XXhm2ToxRJozQL8I11pJpMLmqaBn3aQnvKFPObURWBf3
266
- # JFxGj2T3wWmIdph2PVldQnaHiZdpekjw4KISG2aadMreSx7nDmOu5tTvkpI6nj3c
267
- # AORFJYm2mkQZK37AlLTSYW3rM9nF30sEAMx9HJXDj/chsrIRt7t/8tWMcCxBYKqx
268
- # YxhElRp2Yn72gLD76GSmM9GJB+G9t+ZDpBi4pncB4Q+UDCEdslQpJYls5Q5SUUd0
269
- # viastkF13nqsX40/ybzTQRESW+UQUOsxxcpyFiIJ33xMdT9j7CFfxCBRa2+xq4aL
270
- # T8LWRV+dIPyhHsXAj6KxfgommfXkaS+YHS312amyHeUbAgMBAAGjQjBAMA8GA1Ud
271
- # EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTs1+OC0nFdZEzf
272
- # Lmc/57qYrhwPTzANBgkqhkiG9w0BAQwFAAOCAgEAu2HZfalsvhfEkRvDoaIAjeNk
273
- # aA9Wz3eucPn9mkqZucl4XAwMX+TmFClWCzZJXURj4K2clhhmGyMNPXnpbWvWVPjS
274
- # PMFDQK4dUPVS/JA7u5iZaWvHwaeoaKQn3J35J64whbn2Z006Po9ZOSJTROvIXQPK
275
- # 7VB6fWIhCoDIc2bRoAVgX+iltKevqPdtNZx8WorWojiZ83iL9E3SIAveBO6Mm0eB
276
- # cg3AFDLvMFkuruBx8lbkapdvklBtlo1oepqyNhR6BvIkuQkRUNcIsbiJeoQjYUIp
277
- # 5aPNoiBB19GcZNnqJqGLFNdMGbJQQXE9P01wI4YMStyB0swylIQNCAmXHE/A7msg
278
- # dDDS4Dk0EIUhFQEI6FUy3nFJ2SgXUE3mvk3RdazQyvtBuEOlqtPDBURPLDab4vri
279
- # RbgjU2wGb2dVf0a1TD9uKFp5JtKkqGKX0h7i7UqLvBv9R0oN32dmfrJbQdA75PQ7
280
- # 9ARj6e/CVABRoIoqyc54zNXqhwQYs86vSYiv85KZtrPmYQ/ShQDnUBrkG5WdGaG5
281
- # nLGbsQAe79APT0JsyQq87kP6OnGlyE0mpTX9iV28hWIdMtKgK1TtmlfB2/oQzxm3
282
- # i0objwG2J5VT6LaJbVu8aNQj6ItRolb58KaAoNYes7wPD1N1KarqE3fk3oyBIa0H
283
- # EEcRrYc9B9F1vM/zZn4wggawMIIEmKADAgECAhAIrUCyYNKcTJ9ezam9k67ZMA0G
284
- # CSqGSIb3DQEBDAUAMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ
285
- # bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNVBAMTGERpZ2lDZXJ0
286
- # IFRydXN0ZWQgUm9vdCBHNDAeFw0yMTA0MjkwMDAwMDBaFw0zNjA0MjgyMzU5NTla
287
- # MGkxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdpQ2VydCwgSW5jLjFBMD8GA1UE
288
- # AxM4RGlnaUNlcnQgVHJ1c3RlZCBHNCBDb2RlIFNpZ25pbmcgUlNBNDA5NiBTSEEz
289
- # ODQgMjAyMSBDQTEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDVtC9C
290
- # 0CiteLdd1TlZG7GIQvUzjOs9gZdwxbvEhSYwn6SOaNhc9es0JAfhS0/TeEP0F9ce
291
- # 2vnS1WcaUk8OoVf8iJnBkcyBAz5NcCRks43iCH00fUyAVxJrQ5qZ8sU7H/Lvy0da
292
- # E6ZMswEgJfMQ04uy+wjwiuCdCcBlp/qYgEk1hz1RGeiQIXhFLqGfLOEYwhrMxe6T
293
- # SXBCMo/7xuoc82VokaJNTIIRSFJo3hC9FFdd6BgTZcV/sk+FLEikVoQ11vkunKoA
294
- # FdE3/hoGlMJ8yOobMubKwvSnowMOdKWvObarYBLj6Na59zHh3K3kGKDYwSNHR7Oh
295
- # D26jq22YBoMbt2pnLdK9RBqSEIGPsDsJ18ebMlrC/2pgVItJwZPt4bRc4G/rJvmM
296
- # 1bL5OBDm6s6R9b7T+2+TYTRcvJNFKIM2KmYoX7BzzosmJQayg9Rc9hUZTO1i4F4z
297
- # 8ujo7AqnsAMrkbI2eb73rQgedaZlzLvjSFDzd5Ea/ttQokbIYViY9XwCFjyDKK05
298
- # huzUtw1T0PhH5nUwjewwk3YUpltLXXRhTT8SkXbev1jLchApQfDVxW0mdmgRQRNY
299
- # mtwmKwH0iU1Z23jPgUo+QEdfyYFQc4UQIyFZYIpkVMHMIRroOBl8ZhzNeDhFMJlP
300
- # /2NPTLuqDQhTQXxYPUez+rbsjDIJAsxsPAxWEQIDAQABo4IBWTCCAVUwEgYDVR0T
301
- # AQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaDfg67Y7+F8Rhvv+YXsIiGX0TkIwHwYD
302
- # VR0jBBgwFoAU7NfjgtJxXWRM3y5nP+e6mK4cD08wDgYDVR0PAQH/BAQDAgGGMBMG
303
- # A1UdJQQMMAoGCCsGAQUFBwMDMHcGCCsGAQUFBwEBBGswaTAkBggrBgEFBQcwAYYY
304
- # aHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEEGCCsGAQUFBzAChjVodHRwOi8vY2Fj
305
- # ZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRUcnVzdGVkUm9vdEc0LmNydDBDBgNV
306
- # HR8EPDA6MDigNqA0hjJodHRwOi8vY3JsMy5kaWdpY2VydC5jb20vRGlnaUNlcnRU
307
- # cnVzdGVkUm9vdEc0LmNybDAcBgNVHSAEFTATMAcGBWeBDAEDMAgGBmeBDAEEATAN
308
- # BgkqhkiG9w0BAQwFAAOCAgEAOiNEPY0Idu6PvDqZ01bgAhql+Eg08yy25nRm95Ry
309
- # sQDKr2wwJxMSnpBEn0v9nqN8JtU3vDpdSG2V1T9J9Ce7FoFFUP2cvbaF4HZ+N3HL
310
- # IvdaqpDP9ZNq4+sg0dVQeYiaiorBtr2hSBh+3NiAGhEZGM1hmYFW9snjdufE5Btf
311
- # Q/g+lP92OT2e1JnPSt0o618moZVYSNUa/tcnP/2Q0XaG3RywYFzzDaju4ImhvTnh
312
- # OE7abrs2nfvlIVNaw8rpavGiPttDuDPITzgUkpn13c5UbdldAhQfQDN8A+KVssIh
313
- # dXNSy0bYxDQcoqVLjc1vdjcshT8azibpGL6QB7BDf5WIIIJw8MzK7/0pNVwfiThV
314
- # 9zeKiwmhywvpMRr/LhlcOXHhvpynCgbWJme3kuZOX956rEnPLqR0kq3bPKSchh/j
315
- # wVYbKyP/j7XqiHtwa+aguv06P0WmxOgWkVKLQcBIhEuWTatEQOON8BUozu3xGFYH
316
- # Ki8QxAwIZDwzj64ojDzLj4gLDb879M4ee47vtevLt/B3E+bnKD+sEq6lLyJsQfmC
317
- # XBVmzGwOysWGw/YmMwwHS6DTBwJqakAwSEs0qFEgu60bhQjiWQ1tygVQK+pKHJ6l
318
- # /aCnHwZ05/LWUpD9r4VIIflXO7ScA+2GRfS0YW6/aOImYIbqyK+p/pQd52MbOoZW
319
- # eE4wggd3MIIFX6ADAgECAhAHHxQbizANJfMU6yMM0NHdMA0GCSqGSIb3DQEBCwUA
320
- # MGkxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdpQ2VydCwgSW5jLjFBMD8GA1UE
321
- # AxM4RGlnaUNlcnQgVHJ1c3RlZCBHNCBDb2RlIFNpZ25pbmcgUlNBNDA5NiBTSEEz
322
- # ODQgMjAyMSBDQTEwHhcNMjIwMTE3MDAwMDAwWhcNMjUwMTE1MjM1OTU5WjB8MQsw
323
- # CQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQHEwlCZWF2ZXJ0b24x
324
- # IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMSMwIQYDVQQDExpQ
325
- # eXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
326
- # ADCCAgoCggIBAKgc0BTT+iKbtK6f2mr9pNMUTcAJxKdsuOiSYgDFfwhjQy89koM7
327
- # uP+QV/gwx8MzEt3c9tLJvDccVWQ8H7mVsk/K+X+IufBLCgUi0GGAZUegEAeRlSXx
328
- # xhYScr818ma8EvGIZdiSOhqjYc4KnfgfIS4RLtZSrDFG2tN16yS8skFa3IHyvWdb
329
- # D9PvZ4iYNAS4pjYDRjT/9uzPZ4Pan+53xZIcDgjiTwOh8VGuppxcia6a7xCyKoOA
330
- # GjvCyQsj5223v1/Ig7Dp9mGI+nh1E3IwmyTIIuVHyK6Lqu352diDY+iCMpk9Zanm
331
- # SjmB+GMVs+H/gOiofjjtf6oz0ki3rb7sQ8fTnonIL9dyGTJ0ZFYKeb6BLA66d2GA
332
- # LwxZhLe5WH4Np9HcyXHACkppsE6ynYjTOd7+jN1PRJahN1oERzTzEiV6nCO1M3U1
333
- # HbPTGyq52IMFSBM2/07WTJSbOeXjvYR7aUxK9/ZkJiacl2iZI7IWe7JKhHohqKuc
334
- # eQNyOzxTakLcRkzynvIrk33R9YVqtB4L6wtFxhUjvDnQg16xot2KVPdfyPAWd81w
335
- # tZADmrUtsZ9qG79x1hBdyOl4vUtVPECuyhCxaw+faVjumapPUnwo8ygflJJ74J+B
336
- # Yxf6UuD7m8yzsfXWkdv52DjL74TxzuFTLHPyARWCSCAbzn3ZIly+qIqDAgMBAAGj
337
- # ggIGMIICAjAfBgNVHSMEGDAWgBRoN+Drtjv4XxGG+/5hewiIZfROQjAdBgNVHQ4E
338
- # FgQUt/1Teh2XDuUj2WW3siYWJgkZHA8wDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQM
339
- # MAoGCCsGAQUFBwMDMIG1BgNVHR8Ega0wgaowU6BRoE+GTWh0dHA6Ly9jcmwzLmRp
340
- # Z2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVTaWduaW5nUlNBNDA5NlNI
341
- # QTM4NDIwMjFDQTEuY3JsMFOgUaBPhk1odHRwOi8vY3JsNC5kaWdpY2VydC5jb20v
342
- # RGlnaUNlcnRUcnVzdGVkRzRDb2RlU2lnbmluZ1JTQTQwOTZTSEEzODQyMDIxQ0Ex
343
- # LmNybDA+BgNVHSAENzA1MDMGBmeBDAEEATApMCcGCCsGAQUFBwIBFhtodHRwOi8v
344
- # d3d3LmRpZ2ljZXJ0LmNvbS9DUFMwgZQGCCsGAQUFBwEBBIGHMIGEMCQGCCsGAQUF
345
- # BzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wXAYIKwYBBQUHMAKGUGh0dHA6
346
- # Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVTaWdu
347
- # aW5nUlNBNDA5NlNIQTM4NDIwMjFDQTEuY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZI
348
- # hvcNAQELBQADggIBABxv4AeV/5ltkELHSC63fXAFYS5tadcWTiNc2rskrNLrfH1N
349
- # s0vgSZFoQxYBFKI159E8oQQ1SKbTEubZ/B9kmHPhprHya08+VVzxC88pOEvz68nA
350
- # 82oEM09584aILqYmj8Pj7h/kmZNzuEL7WiwFa/U1hX+XiWfLIJQsAHBla0i7QRF2
351
- # de8/VSF0XXFa2kBQ6aiTsiLyKPNbaNtbcucaUdn6vVUS5izWOXM95BSkFSKdE45O
352
- # q3FForNJXjBvSCpwcP36WklaHL+aHu1upIhCTUkzTHMh8b86WmjRUqbrnvdyR2yd
353
- # I5l1OqcMBjkpPpIV6wcc+KY/RH2xvVuuoHjlUjwq2bHiNoX+W1scCpnA8YTs2d50
354
- # jDHUgwUo+ciwpffH0Riq132NFmrH3r67VaN3TuBxjI8SIZM58WEDkbeoriDk3hxU
355
- # 8ZWV7b8AW6oyVBGfM06UgkfMb58h+tJPrFx8VI/WLq1dTqMfZOm5cuclMnUHs2uq
356
- # rRNtnV8UfidPBL4ZHkTcClQbCoz0UbLhkiDvIS00Dn+BBcxw/TKqVL4Oaz3bkMSs
357
- # M46LciTeucHY9ExRVt3zy7i149sd+F4QozPqn7FrSVHXmem3r7bjyHTxOgqxRCVa
358
- # 18Vtx7P/8bYSBeS+WHCKcliFCecspusCDSlnRUjZwyPdP0VHxaZg2unjHY3rMYIa
359
- # tDCCGrACAQEwfTBpMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIElu
360
- # Yy4xQTA/BgNVBAMTOERpZ2lDZXJ0IFRydXN0ZWQgRzQgQ29kZSBTaWduaW5nIFJT
361
- # QTQwOTYgU0hBMzg0IDIwMjEgQ0ExAhAHHxQbizANJfMU6yMM0NHdMA0GCWCGSAFl
362
- # AwQCAQUAoIHIMBkGCSqGSIb3DQEJAzEMBgorBgEEAYI3AgEEMBwGCisGAQQBgjcC
363
- # AQsxDjAMBgorBgEEAYI3AgEVMC8GCSqGSIb3DQEJBDEiBCBnAZ6P7YvTwq0fbF62
364
- # o7E75R0LxsW5OtyYiFESQckLhjBcBgorBgEEAYI3AgEMMU4wTKBGgEQAQgB1AGkA
365
- # bAB0ADoAIABSAGUAbABlAGEAcwBlAF8AdgAzAC4AMQAxAC4AMwBfADIAMAAyADMA
366
- # MAA0ADAANAAuADAAMaECgAAwDQYJKoZIhvcNAQEBBQAEggIAbmsoeVnvqR4l7EsR
367
- # nUNDQhIoOsioPo5dRYtGRoY3gWX6NnIWzyYo3nlX//xY6JbfZ8oyaqLZULFMkLWm
368
- # +c70FKdQS5yI9auu/DOqmZ0AcPsLXEc7rJZagpBDgi6xCvAyvpAHj1FUcGGzWsE+
369
- # Qp8LkKU5AApLcHpBci3eZYUpiwoTNvDCQLYIv5j5mh8Fb8j2D/sUt2coONsqLllY
370
- # BB1Cpko4g9CEfJKtXKb8g0U8+giDAxt/0r6AMdeqlx9ysFB0Nil+tneagBTQ4vQl
371
- # pl5mztf7JVkzasgDNvNcFMo04crUW5g5oErl3e/bO63v1duN7ZuJBJvKs9aDrogI
372
- # KOLwYbTYa1Y5wHCsz8HCgd3pfRxQgwWL0+zx7+MKpqlvo20JmFG5H8wj3tcdc1FW
373
- # QeOVYzVijkeGqRb21HTNHKuTfV4Gw3cLdT4oOENY3JdkJ+oqnAiSwC1p/Fm3pizG
374
- # wkc3D+JjNYg6UT+9PdWqLtsjaBODM1lB22Bpx/nnPCnUG8WEx9cwi39zJdV+atcZ
375
- # eTKc+Ahpyxot3az6yv9w83+7wIdnSWBWQwAHonjwx1jjMiiDpLyHblqxt/jgejkV
376
- # VQEam7XX3KOKI3CHDC8k3M4V6QTnCTIX/WLslIO57hwUGtOAGmww6/q2NOqKeqpH
377
- # 1B6f/CLtXwSh0d4raerISKVQjYChghc9MIIXOQYKKwYBBAGCNwMDATGCFykwghcl
378
- # BgkqhkiG9w0BBwKgghcWMIIXEgIBAzEPMA0GCWCGSAFlAwQCAQUAMHcGCyqGSIb3
379
- # DQEJEAEEoGgEZjBkAgEBBglghkgBhv1sBwEwMTANBglghkgBZQMEAgEFAAQgkIZK
380
- # LoYk5fC9ubz0LiZd/QHnskSNa7ucOHGD7CWySe4CEHn5OnRbrbmQtyWnJjmOhREY
381
- # DzIwMjMwNDA1MDAwMjQ1WqCCEwcwggbAMIIEqKADAgECAhAMTWlyS5T6PCpKPSkH
382
- # gD1aMA0GCSqGSIb3DQEBCwUAMGMxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdp
383
- # Q2VydCwgSW5jLjE7MDkGA1UEAxMyRGlnaUNlcnQgVHJ1c3RlZCBHNCBSU0E0MDk2
384
- # IFNIQTI1NiBUaW1lU3RhbXBpbmcgQ0EwHhcNMjIwOTIxMDAwMDAwWhcNMzMxMTIx
385
- # MjM1OTU5WjBGMQswCQYDVQQGEwJVUzERMA8GA1UEChMIRGlnaUNlcnQxJDAiBgNV
386
- # BAMTG0RpZ2lDZXJ0IFRpbWVzdGFtcCAyMDIyIC0gMjCCAiIwDQYJKoZIhvcNAQEB
387
- # BQADggIPADCCAgoCggIBAM/spSY6xqnya7uNwQ2a26HoFIV0MxomrNAcVR4eNm28
388
- # klUMYfSdCXc9FZYIL2tkpP0GgxbXkZI4HDEClvtysZc6Va8z7GGK6aYo25BjXL2J
389
- # U+A6LYyHQq4mpOS7eHi5ehbhVsbAumRTuyoW51BIu4hpDIjG8b7gL307scpTjUCD
390
- # HufLckkoHkyAHoVW54Xt8mG8qjoHffarbuVm3eJc9S/tjdRNlYRo44DLannR0hCR
391
- # RinrPibytIzNTLlmyLuqUDgN5YyUXRlav/V7QG5vFqianJVHhoV5PgxeZowaCiS+
392
- # nKrSnLb3T254xCg/oxwPUAY3ugjZNaa1Htp4WB056PhMkRCWfk3h3cKtpX74LRsf
393
- # 7CtGGKMZ9jn39cFPcS6JAxGiS7uYv/pP5Hs27wZE5FX/NurlfDHn88JSxOYWe1p+
394
- # pSVz28BqmSEtY+VZ9U0vkB8nt9KrFOU4ZodRCGv7U0M50GT6Vs/g9ArmFG1keLuY
395
- # /ZTDcyHzL8IuINeBrNPxB9ThvdldS24xlCmL5kGkZZTAWOXlLimQprdhZPrZIGwY
396
- # UWC6poEPCSVT8b876asHDmoHOWIZydaFfxPZjXnPYsXs4Xu5zGcTB5rBeO3GiMiw
397
- # bjJ5xwtZg43G7vUsfHuOy2SJ8bHEuOdTXl9V0n0ZKVkDTvpd6kVzHIR+187i1Dp3
398
- # AgMBAAGjggGLMIIBhzAOBgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/BAIwADAWBgNV
399
- # HSUBAf8EDDAKBggrBgEFBQcDCDAgBgNVHSAEGTAXMAgGBmeBDAEEAjALBglghkgB
400
- # hv1sBwEwHwYDVR0jBBgwFoAUuhbZbU2FL3MpdpovdYxqII+eyG8wHQYDVR0OBBYE
401
- # FGKK3tBh/I8xFO2XC809KpQU31KcMFoGA1UdHwRTMFEwT6BNoEuGSWh0dHA6Ly9j
402
- # cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNFJTQTQwOTZTSEEyNTZU
403
- # aW1lU3RhbXBpbmdDQS5jcmwwgZAGCCsGAQUFBwEBBIGDMIGAMCQGCCsGAQUFBzAB
404
- # hhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wWAYIKwYBBQUHMAKGTGh0dHA6Ly9j
405
- # YWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNFJTQTQwOTZTSEEy
406
- # NTZUaW1lU3RhbXBpbmdDQS5jcnQwDQYJKoZIhvcNAQELBQADggIBAFWqKhrzRvN4
407
- # Vzcw/HXjT9aFI/H8+ZU5myXm93KKmMN31GT8Ffs2wklRLHiIY1UJRjkA/GnUypsp
408
- # +6M/wMkAmxMdsJiJ3HjyzXyFzVOdr2LiYWajFCpFh0qYQitQ/Bu1nggwCfrkLdcJ
409
- # iXn5CeaIzn0buGqim8FTYAnoo7id160fHLjsmEHw9g6A++T/350Qp+sAul9Kjxo6
410
- # UrTqvwlJFTU2WZoPVNKyG39+XgmtdlSKdG3K0gVnK3br/5iyJpU4GYhEFOUKWaJr
411
- # 5yI+RCHSPxzAm+18SLLYkgyRTzxmlK9dAlPrnuKe5NMfhgFknADC6Vp0dQ094XmI
412
- # vxwBl8kZI4DXNlpflhaxYwzGRkA7zl011Fk+Q5oYrsPJy8P7mxNfarXH4PMFw1nf
413
- # J2Ir3kHJU7n/NBBn9iYymHv+XEKUgZSCnawKi8ZLFUrTmJBFYDOA4CPe+AOk9kVH
414
- # 5c64A0JH6EE2cXet/aLol3ROLtoeHYxayB6a1cLwxiKoT5u92ByaUcQvmvZfpyeX
415
- # upYuhVfAYOd4Vn9q78KVmksRAsiCnMkaBXy6cbVOepls9Oie1FqYyJ+/jbsYXEP1
416
- # 0Cro4mLueATbvdH7WwqocH7wl4R44wgDXUcsY6glOJcB0j862uXl9uab3H4szP8X
417
- # TE0AotjWAQ64i+7m4HJViSwnGWH2dwGMMIIGrjCCBJagAwIBAgIQBzY3tyRUfNhH
418
- # rP0oZipeWzANBgkqhkiG9w0BAQsFADBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMM
419
- # RGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQD
420
- # ExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMjIwMzIzMDAwMDAwWhcNMzcw
421
- # MzIyMjM1OTU5WjBjMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIElu
422
- # Yy4xOzA5BgNVBAMTMkRpZ2lDZXJ0IFRydXN0ZWQgRzQgUlNBNDA5NiBTSEEyNTYg
423
- # VGltZVN0YW1waW5nIENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA
424
- # xoY1BkmzwT1ySVFVxyUDxPKRN6mXUaHW0oPRnkyibaCwzIP5WvYRoUQVQl+kiPNo
425
- # +n3znIkLf50fng8zH1ATCyZzlm34V6gCff1DtITaEfFzsbPuK4CEiiIY3+vaPcQX
426
- # f6sZKz5C3GeO6lE98NZW1OcoLevTsbV15x8GZY2UKdPZ7Gnf2ZCHRgB720RBidx8
427
- # ald68Dd5n12sy+iEZLRS8nZH92GDGd1ftFQLIWhuNyG7QKxfst5Kfc71ORJn7w6l
428
- # Y2zkpsUdzTYNXNXmG6jBZHRAp8ByxbpOH7G1WE15/tePc5OsLDnipUjW8LAxE6lX
429
- # KZYnLvWHpo9OdhVVJnCYJn+gGkcgQ+NDY4B7dW4nJZCYOjgRs/b2nuY7W+yB3iIU
430
- # 2YIqx5K/oN7jPqJz+ucfWmyU8lKVEStYdEAoq3NDzt9KoRxrOMUp88qqlnNCaJ+2
431
- # RrOdOqPVA+C/8KI8ykLcGEh/FDTP0kyr75s9/g64ZCr6dSgkQe1CvwWcZklSUPRR
432
- # 8zZJTYsg0ixXNXkrqPNFYLwjjVj33GHek/45wPmyMKVM1+mYSlg+0wOI/rOP015L
433
- # dhJRk8mMDDtbiiKowSYI+RQQEgN9XyO7ZONj4KbhPvbCdLI/Hgl27KtdRnXiYKNY
434
- # CQEoAA6EVO7O6V3IXjASvUaetdN2udIOa5kM0jO0zbECAwEAAaOCAV0wggFZMBIG
435
- # A1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFLoW2W1NhS9zKXaaL3WMaiCPnshv
436
- # MB8GA1UdIwQYMBaAFOzX44LScV1kTN8uZz/nupiuHA9PMA4GA1UdDwEB/wQEAwIB
437
- # hjATBgNVHSUEDDAKBggrBgEFBQcDCDB3BggrBgEFBQcBAQRrMGkwJAYIKwYBBQUH
438
- # MAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBBBggrBgEFBQcwAoY1aHR0cDov
439
- # L2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5jcnQw
440
- # QwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lD
441
- # ZXJ0VHJ1c3RlZFJvb3RHNC5jcmwwIAYDVR0gBBkwFzAIBgZngQwBBAIwCwYJYIZI
442
- # AYb9bAcBMA0GCSqGSIb3DQEBCwUAA4ICAQB9WY7Ak7ZvmKlEIgF+ZtbYIULhsBgu
443
- # EE0TzzBTzr8Y+8dQXeJLKftwig2qKWn8acHPHQfpPmDI2AvlXFvXbYf6hCAlNDFn
444
- # zbYSlm/EUExiHQwIgqgWvalWzxVzjQEiJc6VaT9Hd/tydBTX/6tPiix6q4XNQ1/t
445
- # YLaqT5Fmniye4Iqs5f2MvGQmh2ySvZ180HAKfO+ovHVPulr3qRCyXen/KFSJ8NWK
446
- # cXZl2szwcqMj+sAngkSumScbqyQeJsG33irr9p6xeZmBo1aGqwpFyd/EjaDnmPv7
447
- # pp1yr8THwcFqcdnGE4AJxLafzYeHJLtPo0m5d2aR8XKc6UsCUqc3fpNTrDsdCEkP
448
- # lM05et3/JWOZJyw9P2un8WbDQc1PtkCbISFA0LcTJM3cHXg65J6t5TRxktcma+Q4
449
- # c6umAU+9Pzt4rUyt+8SVe+0KXzM5h0F4ejjpnOHdI/0dKNPH+ejxmF/7K9h+8kad
450
- # dSweJywm228Vex4Ziza4k9Tm8heZWcpw8De/mADfIBZPJ/tgZxahZrrdVcA6KYaw
451
- # mKAr7ZVBtzrVFZgxtGIJDwq9gdkT/r+k0fNX2bwE+oLeMt8EifAAzV3C+dAjfwAL
452
- # 5HYCJtnwZXZCpimHCUcr5n8apIUP/JiW9lVUKx+A+sDyDivl1vupL0QVSucTDh3b
453
- # NzgaoSv27dZ8/DCCBY0wggR1oAMCAQICEA6bGI750C3n79tQ4ghAGFowDQYJKoZI
454
- # hvcNAQEMBQAwZTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ
455
- # MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTEkMCIGA1UEAxMbRGlnaUNlcnQgQXNz
456
- # dXJlZCBJRCBSb290IENBMB4XDTIyMDgwMTAwMDAwMFoXDTMxMTEwOTIzNTk1OVow
457
- # YjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQ
458
- # d3d3LmRpZ2ljZXJ0LmNvbTEhMB8GA1UEAxMYRGlnaUNlcnQgVHJ1c3RlZCBSb290
459
- # IEc0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAv+aQc2jeu+RdSjww
460
- # IjBpM+zCpyUuySE98orYWcLhKac9WKt2ms2uexuEDcQwH/MbpDgW61bGl20dq7J5
461
- # 8soR0uRf1gU8Ug9SH8aeFaV+vp+pVxZZVXKvaJNwwrK6dZlqczKU0RBEEC7fgvMH
462
- # hOZ0O21x4i0MG+4g1ckgHWMpLc7sXk7Ik/ghYZs06wXGXuxbGrzryc/NrDRAX7F6
463
- # Zu53yEioZldXn1RYjgwrt0+nMNlW7sp7XeOtyU9e5TXnMcvak17cjo+A2raRmECQ
464
- # ecN4x7axxLVqGDgDEI3Y1DekLgV9iPWCPhCRcKtVgkEy19sEcypukQF8IUzUvK4b
465
- # A3VdeGbZOjFEmjNAvwjXWkmkwuapoGfdpCe8oU85tRFYF/ckXEaPZPfBaYh2mHY9
466
- # WV1CdoeJl2l6SPDgohIbZpp0yt5LHucOY67m1O+SkjqePdwA5EUlibaaRBkrfsCU
467
- # tNJhbesz2cXfSwQAzH0clcOP9yGyshG3u3/y1YxwLEFgqrFjGESVGnZifvaAsPvo
468
- # ZKYz0YkH4b235kOkGLimdwHhD5QMIR2yVCkliWzlDlJRR3S+Jqy2QXXeeqxfjT/J
469
- # vNNBERJb5RBQ6zHFynIWIgnffEx1P2PsIV/EIFFrb7GrhotPwtZFX50g/KEexcCP
470
- # orF+CiaZ9eRpL5gdLfXZqbId5RsCAwEAAaOCATowggE2MA8GA1UdEwEB/wQFMAMB
471
- # Af8wHQYDVR0OBBYEFOzX44LScV1kTN8uZz/nupiuHA9PMB8GA1UdIwQYMBaAFEXr
472
- # oq/0ksuCMS1Ri6enIZ3zbcgPMA4GA1UdDwEB/wQEAwIBhjB5BggrBgEFBQcBAQRt
473
- # MGswJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBDBggrBgEF
474
- # BQcwAoY3aHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJl
475
- # ZElEUm9vdENBLmNydDBFBgNVHR8EPjA8MDqgOKA2hjRodHRwOi8vY3JsMy5kaWdp
476
- # Y2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMBEGA1UdIAQKMAgw
477
- # BgYEVR0gADANBgkqhkiG9w0BAQwFAAOCAQEAcKC/Q1xV5zhfoKN0Gz22Ftf3v1cH
478
- # vZqsoYcs7IVeqRq7IviHGmlUIu2kiHdtvRoU9BNKei8ttzjv9P+Aufih9/Jy3iS8
479
- # UgPITtAq3votVs/59PesMHqai7Je1M/RQ0SbQyHrlnKhSLSZy51PpwYDE3cnRNTn
480
- # f+hZqPC/Lwum6fI0POz3A8eHqNJMQBk1RmppVLC4oVaO7KTVPeix3P0c2PR3WlxU
481
- # jG/voVA9/HYJaISfb8rbII01YBwCA8sgsKxYoA5AY8WYIsGyWfVVa88nq2x2zm8j
482
- # LfR+cWojayL/ErhULSd+2DrZ8LaHlv1b0VysGMNNn3O3AamfV6peKOK5lDGCA3Yw
483
- # ggNyAgEBMHcwYzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJbmMu
484
- # MTswOQYDVQQDEzJEaWdpQ2VydCBUcnVzdGVkIEc0IFJTQTQwOTYgU0hBMjU2IFRp
485
- # bWVTdGFtcGluZyBDQQIQDE1pckuU+jwqSj0pB4A9WjANBglghkgBZQMEAgEFAKCB
486
- # 0TAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQQwHAYJKoZIhvcNAQkFMQ8XDTIz
487
- # MDQwNTAwMDI0NVowKwYLKoZIhvcNAQkQAgwxHDAaMBgwFgQU84ciTYYzgpI1qZS8
488
- # vY+W6f4cfHMwLwYJKoZIhvcNAQkEMSIEIJBlriw3QJw5qq5ADo60uCRAA2a3vjKn
489
- # zaGl8ppJqVo5MDcGCyqGSIb3DQEJEAIvMSgwJjAkMCIEIMf04b4yKIkgq+ImOr4a
490
- # xPxP5ngcLWTQTIB1V6Ajtbb6MA0GCSqGSIb3DQEBAQUABIICADC4cqKeH7lb0Ll+
491
- # iZIDw+mU6vcA3C8vUPR4KdqQmVlEkjfKdHBpHOI1eRkXwesD+BkrXpRX/NMNKm5w
492
- # eKlymuuS70/NOX03BgnP4A9p4TqSZJcLvrP5VUc7VlMaVwkNj47vft4OF9A7PFs4
493
- # 3e8BJmhhkXDh1j+MdQ5URPGsla8uYm74Cn/T2WPNZ5FFQ8nkoVz93x1c5wUYEruB
494
- # uIyFKwZshDnsYsHetZoBMpWDspcXj0kKAplBW0hUw6kgX7qBKX7doTcZPXP00VM8
495
- # vYnpQkJPGrTZ4S/cN0D5k0ZTXTCTDtOpFaZLbG29OgSFxD/TslfXkf1t8GiuzXvk
496
- # u6xLEPxBW9N4yrun+jUjXr0921HEg7BKRr77bGS9v9b4mfzThomjtdcL3bweU5RE
497
- # 3Bg4qVrgNF9Io8L/n39U7Zd5LG4Nacd+Uv+B1x6sfyQP+vGvY0UEiJUhkGy0ymzm
498
- # RBtsPmJanvIovpkYebSccueoeC08/AUf2LxZ6lfGxkJp95vNj4pWToYRXY2dj5JE
499
- # 6nX7mLYn3mWMbXniPhtpnYJeDahE2cuB3pqbhZSlGpOtF7fEPSCBq9P3YMnuyRun
500
- # thsRTf8xc30muj6bRejnUJj0bNQaByZKAhEENnqH0TXBF7yasT1H3/PyC1pgyzx8
501
- # swIsvJFXCqG2u9lftpHuQYmHPDoq
502
- # SIG # End signature block
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/MARCI-NFL-Betting/Source/Train/xgboost_ATS.py DELETED
@@ -1,73 +0,0 @@
1
- import xgboost as xgb
2
- import pandas as pd
3
- import pickle as pkl
4
- import numpy as np
5
- from tqdm import tqdm
6
- from IPython.display import clear_output
7
- from sklearn.metrics import accuracy_score
8
- from sklearn.model_selection import train_test_split
9
- import os
10
-
11
- current_directory = os.path.dirname(os.path.abspath(__file__))
12
- parent_directory = os.path.dirname(current_directory)
13
- data_directory = os.path.join(parent_directory, 'Data')
14
- model_directory = os.path.join(parent_directory, 'Models')
15
- pickle_directory = os.path.join(parent_directory, 'Pickles')
16
-
17
- file_path = os.path.join(data_directory, 'gbg_and_odds.csv')
18
- data = pd.read_csv(file_path).dropna()
19
-
20
- margin = data['Home-Team-Cover']
21
-
22
- data.drop(columns=['Home-Team-Win','Home-Team-Cover','Over','Season','home_team','away_team','game_date','Key','Home Score','Away Score','Home Odds Close','Away Odds Close','Home Winnings','Away Winnings', 'Home Odds', 'Away Odds'], inplace=True)
23
- features = [i for i in data.columns if i!='game_id']
24
- print(features)
25
- acc_results = []
26
-
27
- for x in tqdm(range(100)):
28
- X_train, X_test, y_train, y_test = train_test_split(data, margin, test_size=.1)
29
-
30
- train_games = X_train['game_id']
31
- test_games = X_test['game_id']
32
-
33
- X_train.drop(columns=['game_id'], inplace=True)
34
- X_test.drop(columns=['game_id'], inplace=True)
35
-
36
- train = xgb.DMatrix(X_train.astype(float).values, label=y_train)
37
- test = xgb.DMatrix(X_test.astype(float).values, label=y_test)
38
-
39
- param = {
40
- 'max_depth': 6,
41
- 'eta': 0.01,
42
- 'objective': 'multi:softprob',
43
- 'num_class': 3
44
- }
45
- epochs = 500
46
-
47
- model = xgb.train(param, train, epochs)
48
- predictions = model.predict(test)
49
- y = []
50
- for z in predictions:
51
- y.append(np.argmax(z))
52
-
53
- acc = round(accuracy_score(y_test, y)*100, 1)
54
- acc_results.append(acc)
55
- clear_output(wait=True)
56
- print(f"Best accuracy: {max(acc_results)}%")
57
-
58
- # only save results if they are the best so far
59
- if acc == max(acc_results):
60
- file_path = os.path.join(pickle_directory, 'train_games_ATS_no_odds.pkl')
61
- with open(file_path,'wb') as f:
62
- pkl.dump(train_games,f)
63
-
64
- file_path = os.path.join(pickle_directory, 'test_games_ATS_no_odds.pkl')
65
- with open(file_path,'wb') as f:
66
- pkl.dump(test_games,f)
67
-
68
- file_path = os.path.join(model_directory, f'xgboost_ATS_no_odds_{acc}%.json')
69
- model.save_model(file_path)
70
-
71
- importances = (model.get_score(importance_type='gain'))
72
- print(pd.DataFrame(zip(features,importances.values())).sort_values(1,ascending=False))
73
- print('Done')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/runners/runner_base.py DELETED
@@ -1,658 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import datetime
9
- import json
10
- import logging
11
- import os
12
- import time
13
- from pathlib import Path
14
-
15
- import torch
16
- import torch.distributed as dist
17
- import webdataset as wds
18
- from minigpt4.common.dist_utils import (
19
- download_cached_file,
20
- get_rank,
21
- get_world_size,
22
- is_main_process,
23
- main_process,
24
- )
25
- from minigpt4.common.registry import registry
26
- from minigpt4.common.utils import is_url
27
- from minigpt4.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset
28
- from minigpt4.datasets.datasets.dataloader_utils import (
29
- IterLoader,
30
- MultiIterLoader,
31
- PrefetchLoader,
32
- )
33
- from torch.nn.parallel import DistributedDataParallel as DDP
34
- from torch.utils.data import DataLoader, DistributedSampler
35
-
36
-
37
- @registry.register_runner("runner_base")
38
- class RunnerBase:
39
- """
40
- A runner class to train and evaluate a model given a task and datasets.
41
-
42
- The runner uses pytorch distributed data parallel by default. Future release
43
- will support other distributed frameworks.
44
- """
45
-
46
- def __init__(self, cfg, task, model, datasets, job_id):
47
- self.config = cfg
48
- self.job_id = job_id
49
-
50
- self.task = task
51
- self.datasets = datasets
52
-
53
- self._model = model
54
-
55
- self._wrapped_model = None
56
- self._device = None
57
- self._optimizer = None
58
- self._scaler = None
59
- self._dataloaders = None
60
- self._lr_sched = None
61
-
62
- self.start_epoch = 0
63
-
64
- # self.setup_seeds()
65
- self.setup_output_dir()
66
-
67
- @property
68
- def device(self):
69
- if self._device is None:
70
- self._device = torch.device(self.config.run_cfg.device)
71
-
72
- return self._device
73
-
74
- @property
75
- def use_distributed(self):
76
- return self.config.run_cfg.distributed
77
-
78
- @property
79
- def model(self):
80
- """
81
- A property to get the DDP-wrapped model on the device.
82
- """
83
- # move model to device
84
- if self._model.device != self.device:
85
- self._model = self._model.to(self.device)
86
-
87
- # distributed training wrapper
88
- if self.use_distributed:
89
- if self._wrapped_model is None:
90
- self._wrapped_model = DDP(
91
- self._model, device_ids=[self.config.run_cfg.gpu]
92
- )
93
- else:
94
- self._wrapped_model = self._model
95
-
96
- return self._wrapped_model
97
-
98
- @property
99
- def optimizer(self):
100
- # TODO make optimizer class and configurations
101
- if self._optimizer is None:
102
- num_parameters = 0
103
- p_wd, p_non_wd = [], []
104
- for n, p in self.model.named_parameters():
105
- if not p.requires_grad:
106
- continue # frozen weights
107
- print(n)
108
- if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n:
109
- p_non_wd.append(p)
110
- else:
111
- p_wd.append(p)
112
- num_parameters += p.data.nelement()
113
- logging.info("number of trainable parameters: %d" % num_parameters)
114
- optim_params = [
115
- {
116
- "params": p_wd,
117
- "weight_decay": float(self.config.run_cfg.weight_decay),
118
- },
119
- {"params": p_non_wd, "weight_decay": 0},
120
- ]
121
- beta2 = self.config.run_cfg.get("beta2", 0.999)
122
- self._optimizer = torch.optim.AdamW(
123
- optim_params,
124
- lr=float(self.config.run_cfg.init_lr),
125
- weight_decay=float(self.config.run_cfg.weight_decay),
126
- betas=(0.9, beta2),
127
- )
128
-
129
- return self._optimizer
130
-
131
- @property
132
- def scaler(self):
133
- amp = self.config.run_cfg.get("amp", False)
134
-
135
- if amp:
136
- if self._scaler is None:
137
- self._scaler = torch.cuda.amp.GradScaler()
138
-
139
- return self._scaler
140
-
141
- @property
142
- def lr_scheduler(self):
143
- """
144
- A property to get and create learning rate scheduler by split just in need.
145
- """
146
- if self._lr_sched is None:
147
- lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)
148
-
149
- # max_epoch = self.config.run_cfg.max_epoch
150
- max_epoch = self.max_epoch
151
- # min_lr = self.config.run_cfg.min_lr
152
- min_lr = self.min_lr
153
- # init_lr = self.config.run_cfg.init_lr
154
- init_lr = self.init_lr
155
-
156
- # optional parameters
157
- decay_rate = self.config.run_cfg.get("lr_decay_rate", None)
158
- warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1)
159
- warmup_steps = self.config.run_cfg.get("warmup_steps", 0)
160
- iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None)
161
-
162
- if iters_per_epoch is None:
163
- try:
164
- iters_per_epoch = len(self.dataloaders['train'])
165
- except (AttributeError, TypeError):
166
- iters_per_epoch = 10000
167
-
168
- self._lr_sched = lr_sched_cls(
169
- optimizer=self.optimizer,
170
- max_epoch=max_epoch,
171
- iters_per_epoch=iters_per_epoch,
172
- min_lr=min_lr,
173
- init_lr=init_lr,
174
- decay_rate=decay_rate,
175
- warmup_start_lr=warmup_start_lr,
176
- warmup_steps=warmup_steps,
177
- )
178
-
179
- return self._lr_sched
180
-
181
- @property
182
- def dataloaders(self) -> dict:
183
- """
184
- A property to get and create dataloaders by split just in need.
185
-
186
- If no train_dataset_ratio is provided, concatenate map-style datasets and
187
- chain wds.DataPipe datasets separately. Training set becomes a tuple
188
- (ConcatDataset, ChainDataset), both are optional but at least one of them is
189
- required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
190
-
191
- If train_dataset_ratio is provided, create a MultiIterLoader to sample
192
- each dataset by ratios during training.
193
-
194
- Currently do not support multiple datasets for validation and test.
195
-
196
- Returns:
197
- dict: {split_name: (tuples of) dataloader}
198
- """
199
- if self._dataloaders is None:
200
-
201
- # concatenate map-style datasets and chain wds.DataPipe datasets separately
202
- # training set becomes a tuple (ConcatDataset, ChainDataset), both are
203
- # optional but at least one of them is required. The resultant ConcatDataset
204
- # and ChainDataset will be sampled evenly.
205
- logging.info(
206
- "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
207
- )
208
-
209
- datasets = reorg_datasets_by_split(self.datasets)
210
- self.datasets = datasets
211
- # self.datasets = concat_datasets(datasets)
212
-
213
- # print dataset statistics after concatenation/chaining
214
- for split_name in self.datasets:
215
- if isinstance(self.datasets[split_name], tuple) or isinstance(
216
- self.datasets[split_name], list
217
- ):
218
- # mixed wds.DataPipeline and torch.utils.data.Dataset
219
- num_records = sum(
220
- [
221
- len(d)
222
- if not type(d) in [wds.DataPipeline, ChainDataset]
223
- else 0
224
- for d in self.datasets[split_name]
225
- ]
226
- )
227
-
228
- else:
229
- if hasattr(self.datasets[split_name], "__len__"):
230
- # a single map-style dataset
231
- num_records = len(self.datasets[split_name])
232
- else:
233
- # a single wds.DataPipeline
234
- num_records = -1
235
- logging.info(
236
- "Only a single wds.DataPipeline dataset, no __len__ attribute."
237
- )
238
-
239
- if num_records >= 0:
240
- logging.info(
241
- "Loaded {} records for {} split from the dataset.".format(
242
- num_records, split_name
243
- )
244
- )
245
-
246
- # create dataloaders
247
- split_names = sorted(self.datasets.keys())
248
-
249
- datasets = [self.datasets[split] for split in split_names]
250
- is_trains = [split in self.train_splits for split in split_names]
251
-
252
- batch_sizes = [
253
- self.config.run_cfg.batch_size_train
254
- if split == "train"
255
- else self.config.run_cfg.batch_size_eval
256
- for split in split_names
257
- ]
258
-
259
- collate_fns = []
260
- for dataset in datasets:
261
- if isinstance(dataset, tuple) or isinstance(dataset, list):
262
- collate_fns.append([getattr(d, "collater", None) for d in dataset])
263
- else:
264
- collate_fns.append(getattr(dataset, "collater", None))
265
-
266
- dataloaders = self.create_loaders(
267
- datasets=datasets,
268
- num_workers=self.config.run_cfg.num_workers,
269
- batch_sizes=batch_sizes,
270
- is_trains=is_trains,
271
- collate_fns=collate_fns,
272
- )
273
-
274
- self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
275
-
276
- return self._dataloaders
277
-
278
- @property
279
- def cuda_enabled(self):
280
- return self.device.type == "cuda"
281
-
282
- @property
283
- def max_epoch(self):
284
- return int(self.config.run_cfg.max_epoch)
285
-
286
- @property
287
- def log_freq(self):
288
- log_freq = self.config.run_cfg.get("log_freq", 50)
289
- return int(log_freq)
290
-
291
- @property
292
- def init_lr(self):
293
- return float(self.config.run_cfg.init_lr)
294
-
295
- @property
296
- def min_lr(self):
297
- return float(self.config.run_cfg.min_lr)
298
-
299
- @property
300
- def accum_grad_iters(self):
301
- return int(self.config.run_cfg.get("accum_grad_iters", 1))
302
-
303
- @property
304
- def valid_splits(self):
305
- valid_splits = self.config.run_cfg.get("valid_splits", [])
306
-
307
- if len(valid_splits) == 0:
308
- logging.info("No validation splits found.")
309
-
310
- return valid_splits
311
-
312
- @property
313
- def test_splits(self):
314
- test_splits = self.config.run_cfg.get("test_splits", [])
315
-
316
- return test_splits
317
-
318
- @property
319
- def train_splits(self):
320
- train_splits = self.config.run_cfg.get("train_splits", [])
321
-
322
- if len(train_splits) == 0:
323
- logging.info("Empty train splits.")
324
-
325
- return train_splits
326
-
327
- @property
328
- def evaluate_only(self):
329
- """
330
- Set to True to skip training.
331
- """
332
- return self.config.run_cfg.evaluate
333
-
334
- @property
335
- def use_dist_eval_sampler(self):
336
- return self.config.run_cfg.get("use_dist_eval_sampler", True)
337
-
338
- @property
339
- def resume_ckpt_path(self):
340
- return self.config.run_cfg.get("resume_ckpt_path", None)
341
-
342
- @property
343
- def train_loader(self):
344
- train_dataloader = self.dataloaders["train"]
345
-
346
- return train_dataloader
347
-
348
- def setup_output_dir(self):
349
- lib_root = Path(registry.get_path("library_root"))
350
-
351
- output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id
352
- result_dir = output_dir / "result"
353
-
354
- output_dir.mkdir(parents=True, exist_ok=True)
355
- result_dir.mkdir(parents=True, exist_ok=True)
356
-
357
- registry.register_path("result_dir", str(result_dir))
358
- registry.register_path("output_dir", str(output_dir))
359
-
360
- self.result_dir = result_dir
361
- self.output_dir = output_dir
362
-
363
- def train(self):
364
- start_time = time.time()
365
- best_agg_metric = 0
366
- best_epoch = 0
367
-
368
- self.log_config()
369
-
370
- # resume from checkpoint if specified
371
- if not self.evaluate_only and self.resume_ckpt_path is not None:
372
- self._load_checkpoint(self.resume_ckpt_path)
373
-
374
- for cur_epoch in range(self.start_epoch, self.max_epoch):
375
- # training phase
376
- if not self.evaluate_only:
377
- logging.info("Start training")
378
- train_stats = self.train_epoch(cur_epoch)
379
- self.log_stats(split_name="train", stats=train_stats)
380
-
381
- # evaluation phase
382
- if len(self.valid_splits) > 0:
383
- for split_name in self.valid_splits:
384
- logging.info("Evaluating on {}.".format(split_name))
385
-
386
- val_log = self.eval_epoch(
387
- split_name=split_name, cur_epoch=cur_epoch
388
- )
389
- if val_log is not None:
390
- if is_main_process():
391
- assert (
392
- "agg_metrics" in val_log
393
- ), "No agg_metrics found in validation log."
394
-
395
- agg_metrics = val_log["agg_metrics"]
396
- if agg_metrics > best_agg_metric and split_name == "val":
397
- best_epoch, best_agg_metric = cur_epoch, agg_metrics
398
-
399
- self._save_checkpoint(cur_epoch, is_best=True)
400
-
401
- val_log.update({"best_epoch": best_epoch})
402
- self.log_stats(val_log, split_name)
403
-
404
- else:
405
- # if no validation split is provided, we just save the checkpoint at the end of each epoch.
406
- if not self.evaluate_only:
407
- self._save_checkpoint(cur_epoch, is_best=False)
408
-
409
- if self.evaluate_only:
410
- break
411
-
412
- if self.config.run_cfg.distributed:
413
- dist.barrier()
414
-
415
- # testing phase
416
- test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch
417
- self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)
418
-
419
- total_time = time.time() - start_time
420
- total_time_str = str(datetime.timedelta(seconds=int(total_time)))
421
- logging.info("Training time {}".format(total_time_str))
422
-
423
- def evaluate(self, cur_epoch="best", skip_reload=False):
424
- test_logs = dict()
425
-
426
- if len(self.test_splits) > 0:
427
- for split_name in self.test_splits:
428
- test_logs[split_name] = self.eval_epoch(
429
- split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload
430
- )
431
-
432
- return test_logs
433
-
434
- def train_epoch(self, epoch):
435
- # train
436
- self.model.train()
437
-
438
- return self.task.train_epoch(
439
- epoch=epoch,
440
- model=self.model,
441
- data_loader=self.train_loader,
442
- optimizer=self.optimizer,
443
- scaler=self.scaler,
444
- lr_scheduler=self.lr_scheduler,
445
- cuda_enabled=self.cuda_enabled,
446
- log_freq=self.log_freq,
447
- accum_grad_iters=self.accum_grad_iters,
448
- )
449
-
450
- @torch.no_grad()
451
- def eval_epoch(self, split_name, cur_epoch, skip_reload=False):
452
- """
453
- Evaluate the model on a given split.
454
-
455
- Args:
456
- split_name (str): name of the split to evaluate on.
457
- cur_epoch (int): current epoch.
458
- skip_reload_best (bool): whether to skip reloading the best checkpoint.
459
- During training, we will reload the best checkpoint for validation.
460
- During testing, we will use provided weights and skip reloading the best checkpoint .
461
- """
462
- data_loader = self.dataloaders.get(split_name, None)
463
- assert data_loader, "data_loader for split {} is None.".format(split_name)
464
-
465
- # TODO In validation, you need to compute loss as well as metrics
466
- # TODO consider moving to model.before_evaluation()
467
- model = self.unwrap_dist_model(self.model)
468
- if not skip_reload and cur_epoch == "best":
469
- model = self._reload_best_model(model)
470
- model.eval()
471
-
472
- self.task.before_evaluation(
473
- model=model,
474
- dataset=self.datasets[split_name],
475
- )
476
- results = self.task.evaluation(model, data_loader)
477
-
478
- if results is not None:
479
- return self.task.after_evaluation(
480
- val_result=results,
481
- split_name=split_name,
482
- epoch=cur_epoch,
483
- )
484
-
485
- def unwrap_dist_model(self, model):
486
- if self.use_distributed:
487
- return model.module
488
- else:
489
- return model
490
-
491
- def create_loaders(
492
- self,
493
- datasets,
494
- num_workers,
495
- batch_sizes,
496
- is_trains,
497
- collate_fns,
498
- dataset_ratios=None,
499
- ):
500
- """
501
- Create dataloaders for training and validation.
502
- """
503
-
504
- def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):
505
- # create a single dataloader for each split
506
- if isinstance(dataset, ChainDataset) or isinstance(
507
- dataset, wds.DataPipeline
508
- ):
509
- # wds.WebdDataset instance are chained together
510
- # webdataset.DataPipeline has its own sampler and collate_fn
511
- loader = iter(
512
- DataLoader(
513
- dataset,
514
- batch_size=bsz,
515
- num_workers=num_workers,
516
- pin_memory=True,
517
- )
518
- )
519
- else:
520
- # map-style dataset are concatenated together
521
- # setup distributed sampler
522
- if self.use_distributed:
523
- sampler = DistributedSampler(
524
- dataset,
525
- shuffle=is_train,
526
- num_replicas=get_world_size(),
527
- rank=get_rank(),
528
- )
529
- if not self.use_dist_eval_sampler:
530
- # e.g. retrieval evaluation
531
- sampler = sampler if is_train else None
532
- else:
533
- sampler = None
534
-
535
- loader = DataLoader(
536
- dataset,
537
- batch_size=bsz,
538
- num_workers=num_workers,
539
- pin_memory=True,
540
- sampler=sampler,
541
- shuffle=sampler is None and is_train,
542
- collate_fn=collate_fn,
543
- drop_last=True if is_train else False,
544
- )
545
- loader = PrefetchLoader(loader)
546
-
547
- if is_train:
548
- loader = IterLoader(loader, use_distributed=self.use_distributed)
549
-
550
- return loader
551
-
552
- loaders = []
553
-
554
- for dataset, bsz, is_train, collate_fn in zip(
555
- datasets, batch_sizes, is_trains, collate_fns
556
- ):
557
- if isinstance(dataset, list) or isinstance(dataset, tuple):
558
- if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None:
559
- dataset_ratios = [d.sample_ratio for d in dataset]
560
- loader = MultiIterLoader(
561
- loaders=[
562
- _create_loader(d, num_workers, bsz, is_train, collate_fn[i])
563
- for i, d in enumerate(dataset)
564
- ],
565
- ratios=dataset_ratios,
566
- )
567
- else:
568
- loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)
569
-
570
- loaders.append(loader)
571
-
572
- return loaders
573
-
574
- @main_process
575
- def _save_checkpoint(self, cur_epoch, is_best=False):
576
- """
577
- Save the checkpoint at the current epoch.
578
- """
579
- model_no_ddp = self.unwrap_dist_model(self.model)
580
- param_grad_dic = {
581
- k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()
582
- }
583
- state_dict = model_no_ddp.state_dict()
584
- for k in list(state_dict.keys()):
585
- if k in param_grad_dic.keys() and not param_grad_dic[k]:
586
- # delete parameters that do not require gradient
587
- del state_dict[k]
588
- save_obj = {
589
- "model": state_dict,
590
- "optimizer": self.optimizer.state_dict(),
591
- "config": self.config.to_dict(),
592
- "scaler": self.scaler.state_dict() if self.scaler else None,
593
- "epoch": cur_epoch,
594
- }
595
- save_to = os.path.join(
596
- self.output_dir,
597
- "checkpoint_{}.pth".format("best" if is_best else cur_epoch),
598
- )
599
- logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to))
600
- torch.save(save_obj, save_to)
601
-
602
- def _reload_best_model(self, model):
603
- """
604
- Load the best checkpoint for evaluation.
605
- """
606
- checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth")
607
-
608
- logging.info("Loading checkpoint from {}.".format(checkpoint_path))
609
- checkpoint = torch.load(checkpoint_path, map_location="cpu")
610
- try:
611
- model.load_state_dict(checkpoint["model"])
612
- except RuntimeError as e:
613
- logging.warning(
614
- """
615
- Key mismatch when loading checkpoint. This is expected if only part of the model is saved.
616
- Trying to load the model with strict=False.
617
- """
618
- )
619
- model.load_state_dict(checkpoint["model"], strict=False)
620
- return model
621
-
622
- def _load_checkpoint(self, url_or_filename):
623
- """
624
- Resume from a checkpoint.
625
- """
626
- if is_url(url_or_filename):
627
- cached_file = download_cached_file(
628
- url_or_filename, check_hash=False, progress=True
629
- )
630
- checkpoint = torch.load(cached_file, map_location=self.device, strict=False)
631
- elif os.path.isfile(url_or_filename):
632
- checkpoint = torch.load(url_or_filename, map_location=self.device, strict=False)
633
- else:
634
- raise RuntimeError("checkpoint url or path is invalid")
635
-
636
- state_dict = checkpoint["model"]
637
- self.unwrap_dist_model(self.model).load_state_dict(state_dict)
638
-
639
- self.optimizer.load_state_dict(checkpoint["optimizer"])
640
- if self.scaler and "scaler" in checkpoint:
641
- self.scaler.load_state_dict(checkpoint["scaler"])
642
-
643
- self.start_epoch = checkpoint["epoch"] + 1
644
- logging.info("Resume checkpoint from {}".format(url_or_filename))
645
-
646
- @main_process
647
- def log_stats(self, stats, split_name):
648
- if isinstance(stats, dict):
649
- log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}}
650
- with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
651
- f.write(json.dumps(log_stats) + "\n")
652
- elif isinstance(stats, list):
653
- pass
654
-
655
- @main_process
656
- def log_config(self):
657
- with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
658
- f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/integer_traits.h DELETED
@@ -1,132 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <limits>
21
- #include <limits.h>
22
-
23
- namespace thrust
24
- {
25
-
26
- namespace detail
27
- {
28
-
29
- template<typename T>
30
- class integer_traits
31
- {
32
- public:
33
- static const bool is_integral = false;
34
- };
35
-
36
- template<typename T, T min_val, T max_val>
37
- class integer_traits_base
38
- {
39
- public:
40
- static const bool is_integral = true;
41
- static const T const_min = min_val;
42
- static const T const_max = max_val;
43
- };
44
-
45
-
46
- template<>
47
- class integer_traits<bool>
48
- : public std::numeric_limits<bool>,
49
- public integer_traits_base<bool, false, true>
50
- {};
51
-
52
-
53
- template<>
54
- class integer_traits<char>
55
- : public std::numeric_limits<char>,
56
- public integer_traits_base<char, CHAR_MIN, CHAR_MAX>
57
- {};
58
-
59
-
60
- template<>
61
- class integer_traits<signed char>
62
- : public std::numeric_limits<signed char>,
63
- public integer_traits_base<signed char, SCHAR_MIN, SCHAR_MAX>
64
- {};
65
-
66
-
67
- template<>
68
- class integer_traits<unsigned char>
69
- : public std::numeric_limits<unsigned char>,
70
- public integer_traits_base<unsigned char, 0, UCHAR_MAX>
71
- {};
72
-
73
-
74
- template<>
75
- class integer_traits<short>
76
- : public std::numeric_limits<short>,
77
- public integer_traits_base<short, SHRT_MIN, SHRT_MAX>
78
- {};
79
-
80
-
81
- template<>
82
- class integer_traits<unsigned short>
83
- : public std::numeric_limits<unsigned short>,
84
- public integer_traits_base<unsigned short, 0, USHRT_MAX>
85
- {};
86
-
87
-
88
- template<>
89
- class integer_traits<int>
90
- : public std::numeric_limits<int>,
91
- public integer_traits_base<int, INT_MIN, INT_MAX>
92
- {};
93
-
94
-
95
- template<>
96
- class integer_traits<unsigned int>
97
- : public std::numeric_limits<unsigned int>,
98
- public integer_traits_base<unsigned int, 0, UINT_MAX>
99
- {};
100
-
101
-
102
- template<>
103
- class integer_traits<long>
104
- : public std::numeric_limits<long>,
105
- public integer_traits_base<long, LONG_MIN, LONG_MAX>
106
- {};
107
-
108
-
109
- template<>
110
- class integer_traits<unsigned long>
111
- : public std::numeric_limits<unsigned long>,
112
- public integer_traits_base<unsigned long, 0, ULONG_MAX>
113
- {};
114
-
115
-
116
- template<>
117
- class integer_traits<long long>
118
- : public std::numeric_limits<long long>,
119
- public integer_traits_base<long long, LLONG_MIN, LLONG_MAX>
120
- {};
121
-
122
-
123
- template<>
124
- class integer_traits<unsigned long long>
125
- : public std::numeric_limits<unsigned long long>,
126
- public integer_traits_base<unsigned long long, 0, ULLONG_MAX>
127
- {};
128
-
129
- } // end detail
130
-
131
- } // end thrust
132
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/future.h DELETED
@@ -1,179 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file thrust/future.h
18
- * \brief `thrust::future`, an asynchronous value type.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/detail/cpp11_required.h>
25
- #include <thrust/detail/modern_gcc_required.h>
26
-
27
- #if THRUST_CPP_DIALECT >= 2011 && !defined(THRUST_LEGACY_GCC)
28
-
29
- #include <thrust/execution_policy.h>
30
- #include <thrust/detail/static_assert.h>
31
-
32
- #include <utility>
33
-
34
- /*
35
- // #include the host system's pointer.h header.
36
- #define __THRUST_HOST_SYSTEM_POINTER_HEADER <__THRUST_HOST_SYSTEM_ROOT/pointer.h>
37
- #include __THRUST_HOST_SYSTEM_POINTER_HEADER
38
- #undef __THRUST_HOST_SYSTEM_POINTER_HEADER
39
- */
40
-
41
- // #include the device system's pointer.h header.
42
- #define __THRUST_DEVICE_SYSTEM_POINTER_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/pointer.h>
43
- #include __THRUST_DEVICE_SYSTEM_POINTER_HEADER
44
- #undef __THRUST_DEVICE_SYSTEM_POINTER_HEADER
45
-
46
- /*
47
- // #include the host system's future.h header.
48
- #define __THRUST_HOST_SYSTEM_FUTURE_HEADER <__THRUST_HOST_SYSTEM_ROOT/future.h>
49
- #include __THRUST_HOST_SYSTEM_FUTURE_HEADER
50
- #undef __THRUST_HOST_SYSTEM_FUTURE_HEADER
51
- */
52
-
53
- // #include the device system's future.h header.
54
- #define __THRUST_DEVICE_SYSTEM_FUTURE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/future.h>
55
- #include __THRUST_DEVICE_SYSTEM_FUTURE_HEADER
56
- #undef __THRUST_DEVICE_SYSTEM_FUTURE_HEADER
57
-
58
- namespace thrust
59
- {
60
-
61
- ///////////////////////////////////////////////////////////////////////////////
62
-
63
- // `select_unique_(future|event)_type` is a hook for choosing the
64
- // `unique_eager_event`/`unique_eager_future` type for a system. `decltype` is
65
- // used to determine the return type of an ADL call to
66
- // `select_unique_eager_(future|event)_type(system)`; that return type should
67
- // be the correct event/future type for `system`. Overloads should only be
68
- // declared, not defined.
69
-
70
- namespace unimplemented
71
- {
72
-
73
- struct no_unique_eager_event_type_found {};
74
-
75
- inline __host__
76
- no_unique_eager_event_type_found
77
- unique_eager_event_type(...) noexcept;
78
-
79
- struct no_unique_eager_future_type_found {};
80
-
81
- template <typename T>
82
- __host__
83
- no_unique_eager_future_type_found
84
- unique_eager_future_type(...) noexcept;
85
-
86
- } // namespace unimplemented
87
-
88
- namespace unique_eager_event_type_detail
89
- {
90
-
91
- using unimplemented::unique_eager_event_type;
92
-
93
- template <typename System>
94
- using select = decltype(
95
- unique_eager_event_type(std::declval<System>())
96
- );
97
-
98
- } // namespace unique_eager_event_type_detail
99
-
100
- namespace unique_eager_future_type_detail
101
- {
102
-
103
- using unimplemented::unique_eager_future_type;
104
-
105
- template <typename System, typename T>
106
- using select = decltype(
107
- unique_eager_future_type<T>(std::declval<System>())
108
- );
109
-
110
- } // namespace unique_eager_future_type_detail
111
-
112
- ///////////////////////////////////////////////////////////////////////////////
113
-
114
- template <typename System>
115
- using unique_eager_event = unique_eager_event_type_detail::select<System>;
116
-
117
- template <typename System>
118
- using event = unique_eager_event<System>;
119
-
120
- ///////////////////////////////////////////////////////////////////////////////
121
-
122
- template <typename System, typename T>
123
- using unique_eager_future = unique_eager_future_type_detail::select<System, T>;
124
-
125
- template <typename System, typename T>
126
- using future = unique_eager_future<System, T>;
127
-
128
- /*
129
- ///////////////////////////////////////////////////////////////////////////////
130
-
131
- using host_unique_eager_event = unique_eager_event_type_detail::select<
132
- thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::tag
133
- >;
134
- using host_event = host_unique_eager_event;
135
-
136
- ///////////////////////////////////////////////////////////////////////////////
137
-
138
- template <typename T>
139
- using host_unique_eager_future = unique_eager_future_type_detail::select<
140
- thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::tag, T
141
- >;
142
- template <typename T>
143
- using host_future = host_unique_eager_future<T>;
144
- */
145
-
146
- ///////////////////////////////////////////////////////////////////////////////
147
-
148
- using device_unique_eager_event = unique_eager_event_type_detail::select<
149
- thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag
150
- >;
151
-
152
- using device_event = device_unique_eager_event;
153
-
154
- ///////////////////////////////////////////////////////////////////////////////
155
-
156
- template <typename T>
157
- using device_unique_eager_future = unique_eager_future_type_detail::select<
158
- thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag, T
159
- >;
160
-
161
- template <typename T>
162
- using device_future = device_unique_eager_future<T>;
163
-
164
- ///////////////////////////////////////////////////////////////////////////////
165
-
166
- struct new_stream_t final {};
167
-
168
- THRUST_INLINE_CONSTANT new_stream_t new_stream{};
169
-
170
- ///////////////////////////////////////////////////////////////////////////////
171
-
172
- using thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::when_all;
173
-
174
- ///////////////////////////////////////////////////////////////////////////////
175
-
176
- } // end namespace thrust
177
-
178
- #endif
179
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/guarded_driver_types.h DELETED
@@ -1,63 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include <driver_types.h> without causing
22
- // warnings from redefinitions of __host__ and __device__.
23
- // carefully save their definitions and restore them
24
- // can't tell exactly when push_macro & pop_macro were introduced to gcc; assume 4.5.0
25
-
26
-
27
- #if !defined(__GNUC__) || ((10000 * __GNUC__ + 100 * __GNUC_MINOR__ + __GNUC_PATCHLEVEL__) >= 40500)
28
- # ifdef __host__
29
- # pragma push_macro("__host__")
30
- # undef __host__
31
- # define THRUST_HOST_NEEDS_RESTORATION
32
- # endif
33
- # ifdef __device__
34
- # pragma push_macro("__device__")
35
- # undef __device__
36
- # define THRUST_DEVICE_NEEDS_RESTORATION
37
- # endif
38
- #else // GNUC pre 4.5.0
39
- # if !defined(__DRIVER_TYPES_H__)
40
- # ifdef __host__
41
- # undef __host__
42
- # endif
43
- # ifdef __device__
44
- # undef __device__
45
- # endif
46
- # endif // __DRIVER_TYPES_H__
47
- #endif // __GNUC__
48
-
49
-
50
- #include <driver_types.h>
51
-
52
-
53
- #if !defined(__GNUC__) || ((10000 * __GNUC__ + 100 * __GNUC_MINOR__ + __GNUC_PATCHLEVEL__) >= 40500)
54
- # ifdef THRUST_HOST_NEEDS_RESTORATION
55
- # pragma pop_macro("__host__")
56
- # undef THRUST_HOST_NEEDS_RESTORATION
57
- # endif
58
- # ifdef THRUST_DEVICE_NEEDS_RESTORATION
59
- # pragma pop_macro("__device__")
60
- # undef THRUST_DEVICE_NEEDS_RESTORATION
61
- # endif
62
- #endif // __GNUC__
63
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/stable_radix_sort.h DELETED
@@ -1,56 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/detail/sequential/execution_policy.h>
21
-
22
- namespace thrust
23
- {
24
- namespace system
25
- {
26
- namespace detail
27
- {
28
- namespace sequential
29
- {
30
-
31
-
32
- template<typename DerivedPolicy,
33
- typename RandomAccessIterator>
34
- __host__ __device__
35
- void stable_radix_sort(sequential::execution_policy<DerivedPolicy> &exec,
36
- RandomAccessIterator begin,
37
- RandomAccessIterator end);
38
-
39
-
40
- template<typename DerivedPolicy,
41
- typename RandomAccessIterator1,
42
- typename RandomAccessIterator2>
43
- __host__ __device__
44
- void stable_radix_sort_by_key(sequential::execution_policy<DerivedPolicy> &exec,
45
- RandomAccessIterator1 keys_begin,
46
- RandomAccessIterator1 keys_end,
47
- RandomAccessIterator2 values_begin);
48
-
49
-
50
- } // end namespace sequential
51
- } // end namespace detail
52
- } // end namespace system
53
- } // end namespace thrust
54
-
55
- #include <thrust/system/detail/sequential/stable_radix_sort.inl>
56
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chaitanya01/InvestingPlatform/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Wizards Streamlit App
3
- emoji: 👀
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference