Commit
·
9d551d1
1
Parent(s):
b063d89
Update parquet files (step 62 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodesk AutoCAD Civil 3D 2018.0.2 (x64) FULL .rar Whats New and Whats Improved in This Update.md +0 -144
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cutmaster2dprov1331fullcrackserialkeygenfree The Benefits and Features of This Powerful Software.md +0 -21
- spaces/1gistliPinn/ChatGPT4/Examples/Altiumfiletypenotrecognized.md +0 -54
- spaces/1gistliPinn/ChatGPT4/Examples/Autoclosets 80 Con Serial.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/BBE.Sonic.Sweet.Bundle.VST.RTAS.v1.0-AiR R33p Setup Free.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Brawlhalla Como Conseguir MonedasMammoth Glory Coins UPDATED.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Durgasaptashatibeejmantrasadhanapdf35 [UPD].md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/El Kulubud Daria Mecmuatul-Ahzabn Dzeltme ve Snflandrmasyla Oluturulan Du Kitab PDF ndir.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK for Android 6.0 The Best Way to Enjoy Retro Games.md +0 -228
- spaces/1phancelerku/anime-remove-background/Download Age of Conquest IV and Create Your Own Custom Maps and Scenarios.md +0 -190
- spaces/1toTree/lora_test/env.py +0 -13
- spaces/2023Liu2023/bingo/src/components/chat-panel.tsx +0 -153
- spaces/232labs/VToonify/vtoonify/model/stylegan/op/readme.md +0 -12
- spaces/2ndelement/voicevox/voicevox_engine/downloadable_library.py +0 -86
- spaces/777DUKE/Ballin/README.md +0 -10
- spaces/AIConsultant/MusicGen/audiocraft/adversarial/discriminators/msd.py +0 -126
- spaces/AIConsultant/MusicGen/audiocraft/optim/cosine_lr_scheduler.py +0 -48
- spaces/AONYLMR/White-box-Cartoonization/README.md +0 -15
- spaces/Ababababababbababa/Ashaar/poetry_diacritizer/trainer.py +0 -447
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/filedropzone/FileDropZone.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/LayoutChildren.js +0 -60
- spaces/AlexWang/lama/bin/paper_runfiles/generate_test_celeba-hq.sh +0 -17
- spaces/AlgoveraAI/web3-wallet/app.py +0 -106
- spaces/Aloento/9Nine-PITS/yin.py +0 -165
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/__init__.py +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/inpaint.md +0 -75
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py +0 -265
- spaces/Andy1621/uniformer_image_detection/configs/pafpn/README.md +0 -26
- spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py +0 -95
- spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/README.md +0 -420
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/gui/ui_model.py +0 -290
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/demo/create_coco_dataset.py +0 -83
- spaces/Audio-AGI/WavJourney/pipeline.py +0 -229
- spaces/Awiny/Image2Paragraph/models/grit_src/grit/config.py +0 -50
- spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/custom_dataset_mapper.py +0 -149
- spaces/AzumaSeren100/XuanShen-Bert-VITS2/README.md +0 -17
- spaces/Benson/text-generation/Examples/Apkue.md +0 -86
- spaces/BetterAPI/BetterChat_new/src/lib/types/Settings.ts +0 -13
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/chardistribution.py +0 -261
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_entry_points.py +0 -86
- spaces/Biliovo/anime-remove-background/README.md +0 -14
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/demo/predictor.py +0 -220
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/batch_norm.py +0 -237
- spaces/CVPR/LIVE/cdf.h +0 -29
- spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/minimum_type.h +0 -162
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/transform_scan.h +0 -22
- spaces/ChandraMohanNayal/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md +0 -40
- spaces/Chomkwoy/Nilkessye/image_text_align.py +0 -111
- spaces/Comet/txt2im-models/comet.py +0 -76
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodesk AutoCAD Civil 3D 2018.0.2 (x64) FULL .rar Whats New and Whats Improved in This Update.md
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br>- Overview: What is the 2018.0.2 version and what are its improvements and fixes<br>- Download: How to download and install the full .rar file | | H2: What is AutoCAD Civil 3D and what are its features and benefits | - Definition: A software for civil engineering design and documentation<br>- Features: A list of some of the main features of AutoCAD Civil 3D such as dynamic models, object-oriented environment, BIM tools, etc.<br>- Benefits: A list of some of the benefits of using AutoCAD Civil 3D such as efficiency, accuracy, collaboration, etc. | | H2: What is the 2018.0.2 version and what are its improvements and fixes | - Release date: When was the 2018.0.2 version released and by whom<br>- Improvements: A list of some of the improvements made in the 2018.0.2 version such as performance, stability, compatibility, etc.<br>- Fixes: A list of some of the fixes made in the 2018.0.2 version such as bugs, errors, issues, etc. | | H2: How to download and install the full .rar file | - Requirements: What are the system requirements for running AutoCAD Civil 3D 2018.0.2<br>- Sources: Where can you find the full .rar file for download<br>- Steps: How to download and install the full .rar file step by step | | H1: Conclusion | - Summary: A brief summary of the main points of the article<br>- Recommendation: A recommendation to download and use AutoCAD Civil 3D 2018.0.2<br>- Call to action: A call to action to visit a website or contact a service for more information or assistance | | H1: FAQs | - Q1: What is the difference between AutoCAD and AutoCAD Civil 3D?<br>- A1: AutoCAD is a general-purpose CAD software that can be used for various design and drafting applications, while AutoCAD Civil 3D is a specialized software that focuses on civil engineering design and documentation.<br>- Q2: What are some of the applications of AutoCAD Civil 3D?<br>- A2: Some of the applications of AutoCAD Civil 3D are surveying, land development, transportation engineering, water resources engineering, environmental engineering, etc.<br>- Q3: How much does AutoCAD Civil 3D cost?<br>- A3: AutoCAD Civil 3D is available as a subscription-based service that costs $2,155 per year or $270 per month.<br>- Q4: How can I learn AutoCAD Civil 3D?<br>- A4: You can learn AutoCAD Civil 3D by taking online courses, watching tutorials, reading manuals, joining forums, or hiring a trainer.<br>- Q5: How can I get support for AutoCAD Civil 3D?<br>- A5: You can get support for AutoCAD Civil 3D by visiting the official website, contacting the customer service, accessing the knowledge base, or joining the community. | # Article with HTML formatting ```html <h1>Autodesk AutoCAD Civil 3D 2018.0.2 (x64) FULL .rar: What is it and why you need it</h1>
|
3 |
-
<p>If you are a civil engineer or a civil engineering student, you probably have heard of <strong>AutoCAD Civil 3D</strong>, one of the most popular and powerful software for civil engineering design and documentation. But do you know what is <strong>Autodesk AutoCAD Civil 3D 2018.0.2 (x64) FULL .rar</strong> and why you need it? In this article, we will explain what this software is, what are its features and benefits, what are its improvements and fixes in the latest version, and how to download and install it.</p>
|
4 |
-
<h2>What is AutoCAD Civil 3D and what are its features and benefits</h2>
|
5 |
-
<p><strong>AutoCAD Civil 3D</strong> is a software developed by <a href="https://www.autodesk.com/">Autodesk</a>, a leading company in design and engineering software solutions. It is a software that allows you to create civil engineering designs and documentation using dynamic models, an object-oriented environment, and powerful tools for building information modeling (BIM).</p>
|
6 |
-
<h2>Autodesk AutoCAD Civil 3D 2018.0.2 (x64) FULL .rar</h2><br /><p><b><b>Download Zip</b> –––––>>> <a href="https://byltly.com/2uKwsP">https://byltly.com/2uKwsP</a></b></p><br /><br />
|
7 |
-
<p>Some of the main features of <strong>AutoCAD Civil 3D</strong> are:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Dynamic models:</strong> You can create dynamic models that update automatically as you make changes to your design parameters.</li>
|
10 |
-
<li><strong>Object-oriented environment:</strong> You can work with objects that have properties and behaviors that reflect real-world elements such as surfaces, alignments, profiles, corridors, pipes, etc.</li>
|
11 |
-
<li><strong>BIM tools:</strong> You can use BIM tools to analyze your design data, generate reports and presentations, collaborate with other stakeholders, and integrate with other software applications.</li>
|
12 |
-
</ul>
|
13 |
-
<p>Some of the benefits of using <strong>AutoCAD Civil 3D</strong> are:</p>
|
14 |
-
<ul>
|
15 |
-
<li><strong>Efficiency:</strong> You can save time and resources by using dynamic models that reduce errors and rework.</li>
|
16 |
-
<li><strong>Accuracy:</strong> You can improve your design quality and accuracy by using object-oriented environment that reflects real-world conditions.</li>
|
17 |
-
<li><strong>Collaboration:</strong> You can enhance your collaboration and communication with other stakeholders by using BIM tools that facilitate data sharing and coordination.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>What is the 2018.0.2 version and what are its improvements and fixes</h2>
|
20 |
-
<p>The <strong>2018.0.2 version</strong> of <strong>AutoCAD Civil 3D</strong> is the latest update released by Autodesk on November 6th, which includes several improvements and fixes that enhance the performance, stability, compatibility, etc.</p>
|
21 |
-
<p>The following table summarizes some of the main improvements made in this version:</p>
|
22 |
-
<table>
|
23 |
-
<tr><th>Area</th><th>Description</th></tr>
|
24 |
-
<tr><td>Civil View</td><td>The performance has been improved when importing large quantities of objects into Autodesk InfraWorks.</td></tr>
|
25 |
-
<tr><td>Data Shortcuts</td><td>The performance has been improved when creating data shortcuts for corridors with large quantities of baselines.</td></tr>
|
26 |
-
<tr><td>Drawing Management</td><td>The stability has been improved when opening drawings containing data shortcuts.</td></tr>
|
27 |
-
<tr><td>Pipes</td><td>The stability has been improved when editing pipe networks in section views.</td></tr>
|
28 |
-
<tr><td>Railings</td><td>The stability has been improved when editing railings in profile views.</td></tr>
|
29 |
-
<tr><td>Roadway Design</td><td>The stability has been improved when editing corridors with large quantities of regions.</td></tr>
|
30 |
-
<tr><td>User Interface</td><td>The compatibility has been improved with high resolution monitors.</td></tr>
|
31 |
-
<tr><td>Xref</td><td>The performance has been improved when opening drawings containing xrefs.</td></tr>
|
32 |
-
</table>
|
33 |
-
<p>The following table summarizes some of the main fixes made in this version:</p>
|
34 |
-
<table>
|
35 |
-
<tr><th>Bug ID</th><th>Description</th></tr>
|
36 |
-
<tr><td>CIVIL-12900</td><td>An issue where corridor solids were not created correctly for some corridors has been resolved.</td></tr>
|
37 |
-
<tr><td>CIVIL-13076</td><td>An issue where corridor feature lines were not created correctly for some corridors has been resolved.</td></tr>
|
38 |
-
<tr><td>CIVIL-13107</td><td>An issue where corridor solids were not displayed correctly in section views has been resolved.</td></tr>
|
39 |
-
<tr><td>CIVIL-13108</td><td>An issue where corridor feature lines were not displayed correctly in section views has been resolved.</td></tr>
|
40 |
-
<tr><td>CIVIL-13109</td><td>An issue where corridor solids were not displayed correctly in plan views has been resolved.</td></tr>
|
41 |
-
```html has been resolved.</td></tr>
|
42 |
-
<tr><td>CIVIL-13111</td><td>An issue where corridor solids were not displayed correctly in 3D views has been resolved.</td></tr>
|
43 |
-
<tr><td>CIVIL-13112</td><td>An issue where corridor feature lines were not displayed correctly in 3D views has been resolved.</td></tr>
|
44 |
-
<tr><td>CIVIL-13113</td><td>An issue where corridor solids were not exported correctly to Autodesk InfraWorks has been resolved.</td></tr>
|
45 |
-
<tr><td>CIVIL-13114</td><td>An issue where corridor feature lines were not exported correctly to Autodesk InfraWorks has been resolved.</td></tr>
|
46 |
-
<tr><td>CIVIL-13115</td><td>An issue where corridor solids were not exported correctly to Autodesk Navisworks has been resolved.</td></tr>
|
47 |
-
<tr><td>CIVIL-13116</td><td>An issue where corridor feature lines were not exported correctly to Autodesk Navisworks has been resolved.</td></tr>
|
48 |
-
<tr><td>CIVIL-13117</td><td>An issue where corridor solids were not exported correctly to Autodesk Revit has been resolved.</td></tr>
|
49 |
-
<tr><td>CIVIL-13118</td><td>An issue where corridor feature lines were not exported correctly to Autodesk Revit has been resolved.</td></tr>
|
50 |
-
</table>
|
51 |
-
<h2>How to download and install the full .rar file</h2>
|
52 |
-
<p>If you want to download and install the <strong>full .rar file</strong> of <strong>AutoCAD Civil 3D 2018.0.2 (x64)</strong>, you need to make sure that your system meets the following requirements:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Operating System: Microsoft Windows 10 (64-bit only), 8.1 (64-bit only), or 7 SP1 (64-bit only)</li>
|
55 |
-
<li>Processor: Minimum: 2.5–2.9 GHz or faster processor / Recommended: 3+ GHz or faster processor</li>
|
56 |
-
<li>Memory: Minimum: 4 GB / Recommended: 16 GB</li>
|
57 |
-
<li>Display Resolution: Minimum: 1360 x 768 (1920 x 1080 recommended) with True Color / Maximum: 4K (3840 x 2160)</li>
|
58 |
-
<li>Display Card: Minimum: 1 GB GPU with 29 GB/s Bandwidth and DirectX 11 compliant / Recommended: 4 GB GPU with 106 GB/s Bandwidth and DirectX 11 compliant</li>
|
59 |
-
<li>Disk Space: Installation: 10 GB</li>
|
60 |
-
<li>Browser: Google Chrome (for AutoCAD web app)</li>
|
61 |
-
<li>.NET Framework: .NET Framework Version 4.6</li>
|
62 |
-
</ul>
|
63 |
-
<p>Once you have checked your system requirements, you can find the <strong>full .rar file</strong> for download from various sources on the internet, such as <a href="https://www.4shared.com/rar/ppJ8u-FHfa/Autodesk_AutoCAD_Civil_3D_2018.html?locale=en">4shared</a>, <a href="https://solidtorrents.to/torrents/autodesk-autocad-civil-3d-2018-0-2-x64-full-www-te-70ecc/5be183046f7d1043a9e2183e/">SolidTorrents</a>, or <a href="https://archive.org/details/autodeskautocad2018.0.2finalx86x64keygensh">Archive.org</a>. However, be careful of the potential risks of downloading files from unverified or untrusted sources, such as viruses, malware, or corrupted files.</p>
|
64 |
-
<p>Autodesk AutoCAD Civil 3D 2018 x64 full version download<br />
|
65 |
-
How to install Autodesk AutoCAD Civil 3D 2018.0.2 on 64-bit Windows<br />
|
66 |
-
Autodesk AutoCAD Civil 3D 2018.0.2 crack serial keygen<br />
|
67 |
-
Autodesk AutoCAD Civil 3D 2018 for civil engineering design<br />
|
68 |
-
Autodesk AutoCAD Civil 3D 2018.0.2 patch update<br />
|
69 |
-
Autodesk AutoCAD Civil 3D 2018 x64 free trial<br />
|
70 |
-
Autodesk AutoCAD Civil 3D 2018 system requirements<br />
|
71 |
-
Autodesk AutoCAD Civil 3D 2018 tutorial pdf<br />
|
72 |
-
Autodesk AutoCAD Civil 3D 2018 new features and enhancements<br />
|
73 |
-
Autodesk AutoCAD Civil 3D 2018 license activation<br />
|
74 |
-
Autodesk AutoCAD Civil 3D 2018 online training course<br />
|
75 |
-
Autodesk AutoCAD Civil 3D 2018 vs Revit<br />
|
76 |
-
Autodesk AutoCAD Civil 3D 2018 user guide<br />
|
77 |
-
Autodesk AutoCAD Civil 3D 2018 product key generator<br />
|
78 |
-
Autodesk AutoCAD Civil 3D 2018 software review<br />
|
79 |
-
Autodesk AutoCAD Civil 3D 2018 tips and tricks<br />
|
80 |
-
Autodesk AutoCAD Civil 3D 2018 support forum<br />
|
81 |
-
Autodesk AutoCAD Civil 3D 2018 best practices<br />
|
82 |
-
Autodesk AutoCAD Civil 3D 2018 comparison with other versions<br />
|
83 |
-
Autodesk AutoCAD Civil 3D 2018 keyboard shortcuts<br />
|
84 |
-
Autodesk AutoCAD Civil 3D 2018 price and discount<br />
|
85 |
-
Autodesk AutoCAD Civil 3D 2018 torrent magnet link<br />
|
86 |
-
Autodesk AutoCAD Civil 3D 2018 workflow and tools<br />
|
87 |
-
Autodesk AutoCAD Civil 3D 2018 certification exam<br />
|
88 |
-
Autodesk AutoCAD Civil 3D 2018 video tutorial<br />
|
89 |
-
Autodesk AutoCAD Civil 3D 2018 sample projects and files<br />
|
90 |
-
Autodesk AutoCAD Civil 3D 2018 error and troubleshooting<br />
|
91 |
-
Autodesk AutoCAD Civil 3D 2018 customization and add-ons<br />
|
92 |
-
Autodesk AutoCAD Civil 3D 2018 release date and changelog<br />
|
93 |
-
Autodesk AutoCAD Civil 3D 2018 benefits and advantages<br />
|
94 |
-
Autodesk AutoCAD Civil 3D 2018 alternatives and competitors<br />
|
95 |
-
Autodesk AutoCAD Civil 3D 2018 feedback and testimonials<br />
|
96 |
-
Autodesk AutoCAD Civil 3D 2018 subscription and renewal<br />
|
97 |
-
Autodesk AutoCAD Civil 3D 2018 offline installer<br />
|
98 |
-
Autodesk AutoCAD Civil 3D 2018 compatibility and interoperability<br />
|
99 |
-
Autodesk AutoCAD Civil RAR file extraction and installation guide <br />
|
100 |
-
How to use Autodesk AutoCAD Civil for land development and infrastructure design <br />
|
101 |
-
How to upgrade from previous versions of Autodesk AutoCAD Civil <br />
|
102 |
-
How to uninstall or remove Autodesk AutoCAD Civil from your computer <br />
|
103 |
-
How to optimize the performance of Autodesk AutoCAD Civil <br />
|
104 |
-
How to import and export data in Autodesk AutoCAD Civil <br />
|
105 |
-
How to create and edit surfaces, alignments, profiles, corridors, and pipe networks in Autodesk Auto CAD Civil <br />
|
106 |
-
How to use dynamic modeling and analysis tools in Autodesk Auto CAD Civil <br />
|
107 |
-
How to collaborate and share data with other users in Autodesk Auto CAD Civil <br />
|
108 |
-
How to generate reports and documentation in Autodesk Auto CAD Civil <br />
|
109 |
-
How to apply standards and styles in Autodesk Auto CAD Civil <br />
|
110 |
-
How to use geospatial data and coordinate systems in Autodesk Auto CAD Civil <br />
|
111 |
-
How to create and manage point clouds in Autodesk Auto CAD Civil <br />
|
112 |
-
How to use visualization and rendering tools in Autodesk Auto CAD</p>
|
113 |
-
<p>To download and install the <strong>full .rar file</strong>, follow these steps:</p>
|
114 |
-
<ol>
|
115 |
-
<li>Download the <strong>full .rar file</strong> from your preferred source and save it to your computer.</li>
|
116 |
-
<li>Extract the <strong>.rar file</strong> using a software such as WinRAR or 7-Zip.</li>
|
117 |
-
<li>Run the <strong>setup.exe file</strong> as administrator and follow the instructions on the screen.</li>
|
118 |
-
<li>Enter your <strong>serial number and product key</strong> when prompted. You can find them on your Autodesk Account or on the packaging of your product.</li>
|
119 |
-
<li>Select your <strong>installation options</strong>, such as language, components, and location.</li>
|
120 |
-
<li>Click <strong>Install</strong> and wait for the installation to complete.</li>
|
121 |
-
<li>Restart your computer if required.</li>
|
122 |
-
<li>Launch <strong>AutoCAD Civil 3D 2018.0.2 (x64)</strong> and enjoy!</li>
|
123 |
-
</ol>
|
124 |
-
<h1>Conclusion</h1>
|
125 |
-
<p>In this article, we have explained what is <strong>Autodesk AutoCAD Civil 3D 2018.0.2 (x64) FULL .rar</strong>, what are its features and benefits, what are its improvements and fixes in the latest version, and how to download and install it. We hope you have found this article useful and informative.</p>
|
126 |
-
<p>If you are looking for a software that can help you create civil engineering designs and documentation using dynamic models, an object-oriented environment, and powerful BIM tools, we recommend you to download and use <strong>AutoCAD Civil 3D 2018.0.2 (x64)</strong>. It is a comprehensive solution that can improve your efficiency, accuracy, and collaboration in your civil engineering projects.</p>
|
127 |
-
<p>If you want to learn more about <strong>AutoCAD Civil 3D 2018.0.2 (x64)</strong>, you can visit the official website, contact the customer service, access the knowledge base, or join the community. You can also find more resources such as tutorials, manuals, forums, or trainers online.</p>
|
128 |
-
<h1>FAQs</h1>
|
129 |
-
<h4>Q1: What is the difference between AutoCAD and AutoCAD Civil 3D?</h4>
|
130 |
-
<p>A1: AutoCAD is a general-purpose CAD software that can be used for various design and drafting applications, while AutoCAD Civil 3D is a specialized software that focuses on civil engineering design and documentation.</p>
|
131 |
-
<h4>Q2: What are some of the applications of AutoCAD Civil 3D?</h4>
|
132 |
-
<p>A2: Some of the applications of AutoCAD Civil 3D are surveying, land development, transportation engineering, water resources engineering, environmental engineering,, etc.</p>
|
133 |
-
<h4>Q3: How much does AutoCAD Civil 3D cost?</h4>
|
134 |
-
<p>A3: AutoCAD Civil 3D is available as a subscription-based service that costs $2,155 per year or $270 per month.</p>
|
135 |
-
<h4>Q4: How can I learn AutoCAD Civil 3D?</h4>
|
136 |
-
<p>A4: You can learn AutoCAD Civil 3D by taking online courses,<
|
137 |
-
watching tutorials,<
|
138 |
-
reading manuals,<
|
139 |
-
[24][25][26][27][28][29][30][31][32][33][34][35][36][37], joining forums,[38][39][40][41][42] or hiring a trainer.[43][44][45][46][47][48][49][50]<p>
|
140 |
-
<h4>Q5: How can I get support for AutoCAD Civil 3D?</h4>
|
141 |
-
<p>A5: You can get support for AutoCAD Civil 3D by visiting the official website,[51] contacting the customer service,[52] accessing the knowledge base,[53] or joining the community.[54]<p>
|
142 |
-
</p> 0a6ba089eb<br />
|
143 |
-
<br />
|
144 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cutmaster2dprov1331fullcrackserialkeygenfree The Benefits and Features of This Powerful Software.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is CutMaster 2D Pro v1.3.3.1?</h1>
|
3 |
-
<p>If you are looking for a professional and powerful software program for cutting and slicing up images and videos, you might want to check out <strong>CutMaster 2D Pro v1.3.3.1</strong>. This software is a highly responsive application that allows users to slice images like a professional.</p>
|
4 |
-
<p>CutMaster 2D Pro v1.3.3.1 is a very versatile program that can handle any type of image or video format, such as JPG, PNG, BMP, GIF, MP4, AVI, MOV, etc.</p>
|
5 |
-
<h2>cutmaster2dprov1331fullcrackserialkeygenfree</h2><br /><p><b><b>Download</b> ★★★ <a href="https://byltly.com/2uKxqd">https://byltly.com/2uKxqd</a></b></p><br /><br />
|
6 |
-
<p>With CutMaster 2D Pro v1.3.3.1, you can quickly and easily create professional style cuts for your projects, such as banners, logos, posters, flyers, brochures, etc.</p>
|
7 |
-
<p>You can also use it to edit your personal photos and videos, such as cropping, rotating, resizing, adding effects, etc.</p>
|
8 |
-
<p>CutMaster 2D Pro v1.3.3.1 has a user-friendly interface that makes it easy to navigate and operate.</p>
|
9 |
-
<p>It also has a lot of features and tools that make it stand out from other similar programs.</p>
|
10 |
-
<h2>Why do you need CutMaster 2D Pro v1.3.3.1?</h2>
|
11 |
-
<p>There are many reasons why you might need CutMaster 2D Pro v1.3.3.1 for your image and video cutting needs.</p>
|
12 |
-
<p>Some of them are:</p>
|
13 |
-
<ul>
|
14 |
-
<li><strong>It saves you time and money</strong>. You don't have to spend hours or days trying to cut your images and videos manually or using other complicated programs that require a lot of skills and resources.</li>
|
15 |
-
<li><strong>It improves your quality and creativity</strong>. You can achieve high-quality results with minimal effort and maximum accuracy using CutMaster 2D Pro v1.3.3.1.</li>
|
16 |
-
<li><strong>It enhances your productivity and efficiency</strong>. You can work faster and smarter with CutMaster 2D Pro v1.3.3.1 by using its advanced features and tools that automate and simplify your cutting process.</li>
|
17 |
-
<li><strong>It gives you more flexibility and control</strong>. You can customize your cuts according to your preferences and needs using CutMaster 2D Pro v1.3.3.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>How to download CutMaster 2D Pro v1.</h4></p> 0a6ba089eb<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Altiumfiletypenotrecognized.md
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
<h2>altiumfiletypenotrecognized</h2><br /><p><b><b>DOWNLOAD</b> ✏ ✏ ✏ <a href="https://imgfil.com/2uxZLY">https://imgfil.com/2uxZLY</a></b></p><br /><br />
|
2 |
-
|
3 |
-
. . but when I want to save a change in the second project it says, ¨are not allowed to save´
|
4 |
-
|
5 |
-
I think the problem is not in the second project because the second project has a lot of.J3 files to simulate and verify the.sch file. So I ask you if I can read the.sch file in the first project and save it in the second project or if I have to read the.sch file in the second project . . ...
|
6 |
-
|
7 |
-
thank you very much in advance . . . . . . .
|
8 |
-
|
9 |
-
A:
|
10 |
-
|
11 |
-
Sorry, I'm a little late with this answer, but in the mean time, I have just had a similar issue.
|
12 |
-
|
13 |
-
In order to "share" a project between multiple Altium instances, you need to make sure that:
|
14 |
-
|
15 |
-
The.sch project file is saved in the "Save As..." dialog (not the "Copy" dialog).
|
16 |
-
|
17 |
-
The new.sch project file is saved in the same folder as the.sch file of the original project.
|
18 |
-
|
19 |
-
I'm sure this has been covered elsewhere on the internet, but here are the links I found through a quick google search:
|
20 |
-
|
21 |
-
Q:
|
22 |
-
|
23 |
-
Map not applied to ArrayList inside a class
|
24 |
-
|
25 |
-
I have an application that reads about 1000 lines from a file and uses the information to make a list of customers. I am trying to print the last name of the customer to console but when I try to use my map I get an error.
|
26 |
-
|
27 |
-
My Customer class:
|
28 |
-
|
29 |
-
public class Customer {
|
30 |
-
|
31 |
-
private String lastName;
|
32 |
-
|
33 |
-
private String firstName;
|
34 |
-
|
35 |
-
private String address;
|
36 |
-
|
37 |
-
public Customer(String firstName, String lastName, String address)
|
38 |
-
|
39 |
-
this.firstName = firstName;
|
40 |
-
|
41 |
-
this.lastName = lastName;
|
42 |
-
|
43 |
-
this.address = address;
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
public String getLastName() {
|
48 |
-
|
49 |
-
return this.lastName;
|
50 |
-
|
51 |
-
public 4fefd39f24<br />
|
52 |
-
<br />
|
53 |
-
<br />
|
54 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autoclosets 80 Con Serial.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Autoclosets 80 Con Serial</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://imgfil.com/2uxYR1">https://imgfil.com/2uxYR1</a></b></p><br /><br />
|
2 |
-
|
3 |
-
<a href=http://retrooms.com/bin/go.php?sid=2><b>暖剪釣潗</b>! ... Online discounts to 80%! ... microcad software s l autoclosets dise o de armarios ... serial killers http://legalmusicworld.in/pop/pop-bubble-game number one ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/BBE.Sonic.Sweet.Bundle.VST.RTAS.v1.0-AiR R33p Setup Free.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>BBE.Sonic.Sweet.Bundle.VST.RTAS.v1.0-AiR r33p setup free</h2><br /><p><b><b>DOWNLOAD</b> ✓✓✓ <a href="https://imgfil.com/2uxZth">https://imgfil.com/2uxZth</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Version: 4.26- File size: 4.22MB- Date added: March 19, 2016- Price: Free- Operating system: ... Bysoft Internet Remote Control 2 6 4 957, 0.00 KB, 570, 0. ... Encoder v1.1.0.44 )2011(,Townopolis,MakeMusic Finale CR13 2012 Setup + ... Pro 6.5.08,BBE Sonic Sweet Bundle VST RTAS (v1.0)- AiR r33p,Ua ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Brawlhalla Como Conseguir MonedasMammoth Glory Coins UPDATED.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>brawlhalla como conseguir monedasMammoth Glory Coins</h2><br /><p><b><b>DOWNLOAD</b> ☑ <a href="https://imgfil.com/2uxYxx">https://imgfil.com/2uxYxx</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
18 Jul 2020 - Race Man 3 Full Movie In Hindi Hd 720p Download Free - brawlhalla como conseguir monedasMammoth Glory Coins. Download Race Man 3 In Hindi Hd 720p Pc...
|
4 |
-
Apr 19, 2019 - Download Race Man 3 Full Movie In Hindi Hd 720p...
|
5 |
-
Download Race Man 3 In Hindi Hd 720p Pc free race man 3 in english download race man 3 in english watch race man 3 in english full movie download race man 3 in english download movie race man 3 in english watch race man 3 in english full movie download race man 3 in english
|
6 |
-
in english download 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Durgasaptashatibeejmantrasadhanapdf35 [UPD].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>durgasaptashatibeejmantrasadhanapdf35</h2><br /><p><b><b>Download</b> === <a href="https://imgfil.com/2uy1y0">https://imgfil.com/2uy1y0</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
An Introduction To Bunraku [HOT] · Free E Book Download __HOT__ In Pdf Lang Lang Piano Book · Durgasaptashatibeejmantrasadhanapdf35. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/El Kulubud Daria Mecmuatul-Ahzabn Dzeltme ve Snflandrmasyla Oluturulan Du Kitab PDF ndir.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>elkulubuddariaindirpdfdownload</h2><br /><p><b><b>DOWNLOAD</b> ○○○ <a href="https://imgfil.com/2uxZrg">https://imgfil.com/2uxZrg</a></b></p><br /><br />
|
2 |
-
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK for Android 6.0 The Best Way to Enjoy Retro Games.md
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Introduction</h1>
|
3 |
-
<p>If you are a fan of Nintendo GameCube and Wii games, you might have wished to play them on your Android device. Well, thanks to <a href="(^5^)">Dolphin Emulator</a>, you can do just that! Dolphin Emulator is a free and open-source software that allows you to run GameCube and Wii games on your Android device in full HD (1080p) with several enhancements, such as compatibility with all PC controllers, turbo speed, networked multiplayer, and even more.</p>
|
4 |
-
<h2>dolphin emulator android 6.0 apk</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://urlin.us/2uSSy6">https://urlin.us/2uSSy6</a></b></p><br /><br />
|
5 |
-
<p>Dolphin Emulator has been around since 2003 as a desktop application for Windows, Linux, and macOS. It was the first GameCube emulator that could successfully run commercial games. Later on, it also gained support for Wii emulation. In 2013, Dolphin Emulator was ported to Android as a beta version, and since then it has been updated regularly with new features and bug fixes.</p>
|
6 |
-
<p>However, Dolphin Emulator is not a perfect emulator. It has some requirements and challenges that you need to be aware of before using it on your Android device. For example, you need a powerful device that can handle the emulation workload, you need to obtain the GameCube and Wii games legally from your own discs or backups, you need to install the app manually from an external source, you need to configure the settings and preferences according to your device and game compatibility, and you need to troubleshoot some errors and issues that may arise during the emulation process.</p>
|
7 |
-
<p>In this article, I will provide you with a comprehensive guide on how to download, install, and use Dolphin Emulator Android 6.0 APK on your Android device. I will also answer some frequently asked questions and share some user reviews about this emulator.</p>
|
8 |
-
<h1>Downloading Dolphin Emulator Android 6.0 APK</h1>
|
9 |
-
<p>The first step to use Dolphin Emulator on your Android device is to download the APK file from a reliable source. The APK file is an executable file that contains the app's code and resources. You can download Dolphin Emulator Android 6.0 APK from either the official website or other sources.</p>
|
10 |
-
<h2>How to download Dolphin Emulator Android 6.0 APK from the official website?</h2>
|
11 |
-
<p>The official website of Dolphin Emulator is <a href="">https://dolphin-emu.org</a>. Here you can find the latest news, updates, downloads, and documentation about the emulator. You can also join the community forums, chat rooms, and social media pages to interact with other users and developers.</p>
|
12 |
-
<p>To download Dolphin Emulator Android 6.0 APK from the official website, follow these steps:</p>
|
13 |
-
<ol>
|
14 |
-
<li>Go to <a href="">https://dolphin-emu.org</a> on your web browser.</li>
|
15 |
-
<li>Click on the <strong>Download</strong> button on the top right corner of the homepage.</li>
|
16 |
-
<li>Select <strong>Android</strong> from the drop-down menu.</li>
|
17 |
-
<li>You will be redirected to a page with a list of available versions of Dolphin Emulator for Android. The latest version is usually at the top of the list.</li>
|
18 |
-
<li>Click on the <strong>Download APK</strong> button next to the version you want to download. You can also check the release notes, changelog, and compatibility list for each version by clicking on the respective links.</li>
|
19 |
-
<li>A pop-up window will appear asking you to confirm your download. Click on <strong>OK</strong> to proceed.</li>
|
20 |
-
<li>The APK file will be downloaded to your device's default download folder. You can check the progress and status of your download on your notification bar or download manager app.</li>
|
21 |
-
</ol>
|
22 |
-
<h2>How to download Dolphin Emulator Android 6.0 APK from other sources?</h2>
|
23 |
-
<p>If you cannot access the official website of Dolphin Emulator for some reason, or if you want to download an older or modified version of Dolphin Emulator Android 6.0 APK, you can also find it on other sources, such as third-party websites, app stores, file hosting services, or torrent sites. However, you need to be careful when downloading from these sources, as they may not be trustworthy or safe. Some of them may contain malware, viruses, spyware, adware, or other unwanted programs that can harm your device or compromise your privacy. Some of them may also provide fake or corrupted files that may not work properly or cause errors and issues with your emulator.</p>
|
24 |
-
<p>dolphin emulator android 6.0 apk download<br />
|
25 |
-
dolphin emulator android 6.0 apk free<br />
|
26 |
-
dolphin emulator android 6.0 apk latest version<br />
|
27 |
-
dolphin emulator android 6.0 apk mod<br />
|
28 |
-
dolphin emulator android 6.0 apk no root<br />
|
29 |
-
dolphin emulator android 6.0 apk offline<br />
|
30 |
-
dolphin emulator android 6.0 apk old version<br />
|
31 |
-
dolphin emulator android 6.0 apk pro<br />
|
32 |
-
dolphin emulator android 6.0 apk reddit<br />
|
33 |
-
dolphin emulator android 6.0 apk update<br />
|
34 |
-
dolphin emulator android 6.0 apk xda<br />
|
35 |
-
dolphin emulator android 6.0 apk youtube<br />
|
36 |
-
dolphin emulator android 6.0 games apk<br />
|
37 |
-
dolphin emulator android 6.0 marshmallow apk<br />
|
38 |
-
dolphin emulator android 6.0 nougat apk<br />
|
39 |
-
dolphin emulator android 6.0 oreo apk<br />
|
40 |
-
dolphin emulator android 6.0 pie apk<br />
|
41 |
-
dolphin emulator android 6.0 q apk<br />
|
42 |
-
dolphin emulator android 6.0 r apk<br />
|
43 |
-
dolphin emulator android 6.0 s apk<br />
|
44 |
-
best settings for dolphin emulator android 6.0 apk<br />
|
45 |
-
how to install dolphin emulator android 6.0 apk<br />
|
46 |
-
how to use dolphin emulator android 6.0 apk<br />
|
47 |
-
is dolphin emulator android 6.0 apk safe<br />
|
48 |
-
what is dolphin emulator android 6.0 apk<br />
|
49 |
-
where to download dolphin emulator android 6.0 apk<br />
|
50 |
-
why dolphin emulator android 6.0 apk not working<br />
|
51 |
-
wii games for dolphin emulator android 6.0 apk<br />
|
52 |
-
gamecube games for dolphin emulator android 6.0 apk<br />
|
53 |
-
nintendo games for dolphin emulator android 6.0 apk<br />
|
54 |
-
mario games for dolphin emulator android 6.0 apk<br />
|
55 |
-
zelda games for dolphin emulator android 6.0 apk<br />
|
56 |
-
pokemon games for dolphin emulator android 6.0 apk<br />
|
57 |
-
resident evil games for dolphin emulator android 6.0 apk<br />
|
58 |
-
sonic games for dolphin emulator android 6.0 apk<br />
|
59 |
-
kirby games for dolphin emulator android 6.0 apk<br />
|
60 |
-
metroid games for dolphin emulator android 6.0 apk<br />
|
61 |
-
fire emblem games for dolphin emulator android 6.0 apk<br />
|
62 |
-
animal crossing games for dolphin emulator android 6.0 apk<br />
|
63 |
-
super smash bros games for dolphin emulator android 6.0 apk<br />
|
64 |
-
mario kart games for dolphin emulator android 6.0 apk<br />
|
65 |
-
mario party games for dolphin emulator android 6.0 apk<br />
|
66 |
-
mario sports games for dolphin emulator android 6.0 apk<br />
|
67 |
-
mario golf games for dolphin emulator android 6.0 apk<br />
|
68 |
-
mario tennis games for dolphin emulator android 6.0 apk<br />
|
69 |
-
mario baseball games for dolphin emulator android 6.0 apk<br />
|
70 |
-
mario soccer games for dolphin emulator android 6.0 apk<br />
|
71 |
-
mario basketball games for dolphin emulator android 6.0 apk</p>
|
72 |
-
<p>To download Dolphin Emulator Android 6.0 APK from other sources, follow these steps:</p>
|
73 |
-
<ol>
|
74 |
-
<li>Search for "Dolphin Emulator Android 6.0 APK" on your preferred search engine or app store. You can also use keywords such as "download", "free", "latest", "modded", "cracked", "unlocked", etc. to narrow down your search results.</li>
|
75 |
-
<li>Browse through the results and select a source that looks reliable and reputable. You can check the ratings, reviews, comments, feedback, and reputation of the source before downloading from it. You can also use tools such as VirusTotal, Malwarebytes, or Norton to scan the URL or file for any potential threats.</li>
|
76 |
-
<li>Click on the <strong>Download</strong> button or link on the source's page. You may have to go through some ads, pop-ups, surveys, or captcha verification before you can access the download link. Be careful not to click on any suspicious or misleading links or buttons that may redirect you to unwanted sites or install unwanted programs on your device.</li>
|
77 |
-
<li>The APK file will be downloaded to your device's default download folder. You can check the progress and status of your download on your notification bar or download manager app.</li>
|
78 |
-
</ol>
|
79 |
-
<h2>How to verify the integrity and safety of the downloaded file?</h2>
|
80 |
-
<p>After downloading Dolphin Emulator Android 6.0 APK from any source, you should always verify the integrity and safety of the downloaded file before installing it on your device. This is to ensure that the file is authentic, complete, and free from any malicious code or modification that may affect its performance or functionality.</p>
|
81 |
-
<p>To verify the integrity and safety of the downloaded file, follow these steps:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Check the file size and name of the downloaded file. Compare it with the original file size and name from the official website or source. If there is a significant difference in size or name, it may indicate that the file is fake or corrupted.</li>
|
84 |
-
<li>Check the file extension of the downloaded file. It should be ".apk" which stands for Android Package Kit. If it is anything else, such as ".zip", ".rar", ".exe", ".bin", etc., it may indicate that the file is not an APK file or that it contains other files that may be harmful or unnecessary.</li>
|
85 |
-
<li>Check the file signature or checksum of the downloaded file. This is a unique code that identifies and verifies the authenticity and integrity of a file. You can use tools such as <a href="">MD5 & SHA Checksum Utility</a>, <a href="">HashTab</a>, or <a href="">Checksum Calculator</a> to generate and compare the file signature or checksum of the downloaded file with the original one from the official website or source. If they match, it means that the file is authentic and intact. If they don't match, it means that the file is fake or corrupted.</li>
|
86 |
-
<li>Scan the file with a reputable antivirus or anti-malware program, such as <a href="">Avast</a>, <a href="">Malwarebytes</a>, or <a href="(^-1^)">Norton</a>. These programs can detect and remove any malicious code or modification that may be hidden in the file. They can also protect your device from any potential threats that may arise from installing or running the file.</li>
|
87 |
-
</ol>
|
88 |
-
<p>If the downloaded file passes all these checks, you can proceed to install it on your device. If not, you should delete it immediately and download it again from a different source.</p>
|
89 |
-
<h1>Installing Dolphin Emulator Android 6.0 APK</h1>
|
90 |
-
<p>The next step to use Dolphin Emulator on your Android device is to install the APK file on your device. However, since Dolphin Emulator is not available on the Google Play Store, you need to install it manually from an external source. This means that you need to grant permissions and overcome security restrictions that may prevent you from installing apps from unknown sources.</p>
|
91 |
-
<h2>How to install Dolphin Emulator Android 6.0 APK on your Android device?</h2>
|
92 |
-
<p>To install Dolphin Emulator Android 6.0 APK on your Android device, follow these steps:</p>
|
93 |
-
<ol>
|
94 |
-
<li>Locate the downloaded APK file on your device's file manager app or download manager app. You can also use a third-party file manager app, such as <a href="(^-2^)">ES File Explorer</a>, <a href="(^-3^)">File Manager</a>, or <a href="(^-4^)">Solid Explorer</a> to locate the file.</li>
|
95 |
-
<li>Tap on the APK file to open it. A pop-up window will appear asking you to confirm your installation. Tap on <strong>Install</strong> to proceed.</li>
|
96 |
-
<li>If you see a message saying "For your security, your phone is not allowed to install unknown apps from this source", tap on <strong>Settings</strong>. This will take you to a screen where you can enable the option to allow installing apps from unknown sources. Depending on your device model and Android version, this option may be called "Unknown sources", "Install unknown apps", "Allow app installs", or something similar. Toggle the switch or check the box next to this option to enable it.</li>
|
97 |
-
<li>Go back to the installation screen and tap on <strong>Install</strong> again. The installation process will begin and may take a few seconds or minutes depending on your device's speed and performance.</li>
|
98 |
-
<li>Once the installation is complete, you will see a message saying "App installed". Tap on <strong>Open</strong> to launch Dolphin Emulator on your device. You can also tap on <strong>Done</strong> to close the installation screen and find Dolphin Emulator on your app drawer or home screen.</li>
|
99 |
-
</ol>
|
100 |
-
<h2>How to grant permissions and overcome security restrictions?</h2>
|
101 |
-
<p>Dolphin Emulator requires some permissions and access to certain features and functions of your device in order to work properly. For example, it needs access to your storage, camera, microphone, location, network, etc. You need to grant these permissions and overcome any security restrictions that may prevent Dolphin Emulator from accessing these features and functions.</p>
|
102 |
-
<p>To grant permissions and overcome security restrictions, follow these steps:</p>
|
103 |
-
<ol>
|
104 |
-
<li>The first time you launch Dolphin Emulator on your device, you will see a series of pop-up windows asking you to grant various permissions to the app. Tap on <strong>Allow</strong> or <strong>Accept</strong> for each permission request. You can also tap on <strong>Deny</strong> or <strong>Reject</strong> if you don't want to grant a certain permission, but this may affect the performance or functionality of the app.</li>
|
105 |
-
<li>If you want to change or manage the permissions for Dolphin Emulator later, go to your device's settings app and look for the option called "Apps", "Applications", "App Manager", or something similar. Tap on this option and find Dolphin Emulator from the list of installed apps. Tap on Dolphin Emulator and then tap on <strong>Permissions</strong>. Here you can see all the permissions that Dolphin Emulator has requested and whether they are granted or denied. You can toggle the switch or check the box next to each permission to grant or revoke it.</li>
|
106 |
-
<li>Some features and functions of Dolphin Emulator may be blocked or restricted by your device's security settings, such as battery optimization, data usage, background activity, overlay, etc. These settings may prevent Dolphin Emulator from running smoothly or at all. To overcome these security restrictions, go to your device's settings app and look for the option called "Security", "Privacy", "Battery", "Data", or something similar. Tap on this option and find Dolphin Emulator from the list of apps or features. Tap on Dolphin Emulator and then tap on the option that allows you to disable or bypass the security restriction. For example, you may need to disable battery optimization, allow unrestricted data usage, enable background activity, allow overlay, etc.</li>
|
107 |
-
</ol>
|
108 |
-
<p>By granting permissions and overcoming security restrictions, you can ensure that Dolphin Emulator can access all the features and functions it needs to run GameCube and Wii games on your Android device.</p>
|
109 |
-
<h1>Using Dolphin Emulator Android 6.0 APK</h1>
|
110 |
-
<p>After installing Dolphin Emulator Android 6.0 APK on your Android device, you can start using it to play GameCube and Wii games on your device. However, before you can do that, you need to obtain and load the games on the emulator. You also need to customize the graphics and audio settings of the emulator according to your device and game compatibility. You also need to connect and use controllers with the emulator if you prefer to play with physical buttons and joysticks. You also need to play online multiplayer games with the emulator if you want to enjoy the social aspect of gaming.</p>
|
111 |
-
<h2>How to obtain and load GameCube and Wii games on Dolphin Emulator Android 6.0 APK?</h2>
|
112 |
-
<p>Dolphin Emulator does not come with any GameCube or Wii games pre-installed or included in the app. You need to obtain the games legally from your own discs or backups and load them on the emulator. The games are usually in the form of ISO or WBFS files that contain the game data and can be read by the emulator.</p>
|
113 |
-
<p>To obtain and load GameCube and Wii games on Dolphin Emulator Android 6.0 APK, follow these steps:</p>
|
114 |
-
<ol>
|
115 |
-
<li>If you have the original GameCube or Wii discs, you can use a disc drive and a software tool, such as <a href="(^-5^)">CleanRip</a>, <a href="(^-6^)">RawDump</a>, or <a href="(^-7^)">FriiDump</a> to rip the discs and create ISO or WBFS files on your computer. You can also use a modded Wii console and a software tool, such as <a href="(^-8^)">USB Loader GX</a>, <a href="(^-9^)">WiiFlow</a>, or <a href="(^-10^)">CFG USB Loader</a> to rip the discs and create ISO or WBFS files on a USB drive.</li>
|
116 |
-
<li>If you have backup copies of GameCube or Wii games, you can use a software tool, such as <a href="(^-11^)">Wii Backup Manager</a>, <a href="(^-12^)">Witgui</a>, or <a href="(^-13^)">Wii Backup Fusion</a> to convert them into ISO or WBFS files on your computer.</li>
|
117 |
-
<li>Once you have the ISO or WBFS files of the games you want to play, you need to transfer them to your Android device's storage. You can use a USB cable, a microSD card, a cloud service, or a wireless method to do so.</li>
|
118 |
-
<li>On your Android device, launch Dolphin Emulator and tap on the <strong>Add Folder</strong> button on the top right corner of the screen. This will allow you to browse your device's storage and select the folder where you stored your ISO or WBFS files.</li>
|
119 |
-
<li>Dolphin Emulator will scan the folder and display all the games that it can recognize in a grid view. You can tap on any game to see more details about it, such as title, region, size, rating, etc.</li>
|
120 |
-
<li>To load a game, simply tap on its icon and wait for Dolphin Emulator to launch it. You will see a loading screen with some information about the game and the emulator's status.</li>
|
121 |
-
<li>Once the game is loaded, you can start playing it on your Android device using either touch controls or physical controllers.</li>
|
122 |
-
</ol>
|
123 |
-
<h2>How to customize the graphics and audio settings of Dolphin Emulator Android 6.0 APK?</h2>
|
124 |
-
<p>Dolphin Emulator allows you to customize the graphics and audio settings of each game according to your device's capabilities and preferences. You can adjust the resolution, aspect ratio, anti-aliasing, anisotropic filtering, texture scaling, frame rate, sound volume, and other options to enhance or optimize your gaming experience. However, you should also be aware that some of these settings may affect the performance or compatibility of the emulator or the game. You may need to experiment with different settings to find the best balance between quality and speed.</p>
|
125 |
-
<p>To customize the graphics and audio settings of Dolphin Emulator Android 6.0 APK, follow these steps:</p>
|
126 |
-
<ol>
|
127 |
-
<li>On your Android device, launch Dolphin Emulator and tap on the <strong>Menu</strong> button on the top left corner of the screen. This will open a sidebar with various options.</li>
|
128 |
-
<li>Tap on <strong>Settings</strong> to access the emulator's settings menu.</li>
|
129 |
-
<li>Tap on <strong>Graphics</strong> to access the graphics settings menu. Here you can see four tabs: <strong>General</strong>, <strong>Enhancements</strong>, <strong>Hacks</strong>, and <strong>Advanced</strong>. Each tab contains different options that you can tweak according to your needs and preferences.</li>
|
130 |
-
<li>The <strong>General</strong> tab allows you to change the basic graphics settings, such as video backend, aspect ratio, resolution, vsync, etc.</li>
|
131 |
-
<li>The <strong>Enhancements</strong> tab allows you to change the advanced graphics settings, such as anti-aliasing, anisotropic filtering, texture scaling, post-processing effects, etc.</li>
|
132 |
-
<li>The <strong>Hacks</strong> tab allows you to change the performance-related graphics settings, such as skip EFB access, ignore format changes, store EFB copies to texture only, etc.</li>
|
133 |
-
<li>The <strong>Advanced</strong> tab allows you to change the experimental graphics settings, such as shader compilation mode, asynchronous shader compilation, etc.</li>
|
134 |
-
<li>To change any of these settings, simply tap on the option and select the value or toggle the switch that suits your needs and preferences. You can also tap on the <strong>i</strong> icon next to each option to see a brief explanation of what it does and how it affects the emulation.</li>
|
135 |
-
<li>If you want to reset all the graphics settings to their default values, tap on the <strong>Reset All Settings</strong> button at the bottom of the screen.</li>
|
136 |
-
<li>To save your changes and exit the graphics settings menu, tap on the <strong>Back</strong> button on your device or emulator.</li>
|
137 |
-
<li>To access the audio settings menu, tap on <strong>Audio</strong> from the settings menu. Here you can see two options: <strong>Enable Sound Output</strong> and <strong>Volume</strong>.</li>
|
138 |
-
<li>To enable or disable sound output from the emulator, toggle the switch next to <strong>Enable Sound Output</strong>. If you disable sound output, you will not hear any sound from the emulator or the game.</li>
|
139 |
-
<li>To adjust the volume of the sound output from the emulator, drag the slider next to <strong>Volume</strong>. You can also use your device's volume buttons to adjust the volume.</li>
|
140 |
-
<li>To save your changes and exit the audio settings menu, tap on the <strong>Back</strong> button on your device or emulator.</li>
|
141 |
-
</ol>
|
142 |
-
<h1>Troubleshooting Dolphin Emulator Android 6.0 APK</h1>
|
143 |
-
<p>Dolphin Emulator is a complex software that may encounter some errors and issues during its operation. Some of these errors and issues may be caused by factors such as device specifications, game compatibility, app configuration, network connection, etc. Some of them may be easy to fix or resolve by following some simple steps or tips. Some of them may require more advanced or technical solutions or assistance from the developers or support team.</p>
|
144 |
-
<h2>How to fix common errors and issues with Dolphin Emulator Android 6.0 APK?</h2>
|
145 |
-
<p>To fix common errors and issues with Dolphin Emulator Android 6.0 APK, follow these steps:</p>
|
146 |
-
<ol>
|
147 |
-
<li>If you experience crashes, freezes, slowdowns, glitches, or other performance problems with Dolphin Emulator or a game, try these tips:</li>
|
148 |
-
<ul>
|
149 |
-
<li>Close any other apps or processes that may be running in the background and consuming your device's resources.</li>
|
150 |
-
<li>Clean your device's cache and memory using a cleaning app or tool.</li>
|
151 |
-
<li>Restart your device and launch Dolphin Emulator again.</li>
|
152 |
-
<li>Lower or disable some of the graphics and audio settings that may be taxing your device's capabilities.</li>
|
153 |
-
<li>Check if your device meets the minimum system requirements for Dolphin Emulator and the game you are trying to play.</li>
|
154 |
-
<li>Update Dolphin Emulator to the latest version available.</li>
|
155 |
-
<li>Update your device's software and drivers to the latest version available.</li>
|
156 |
-
<li>Check if the game you are trying to play is compatible with Dolphin Emulator and your device. You can use the <a href="(^-14^)">compatibility list</a> on the official website or the <a href="(^-15^)">game wiki</a> to see the compatibility rating, issues, and solutions for each game.</li>
|
157 |
-
<li>Try a different version or build of Dolphin Emulator or the game you are trying to play. You can find older or newer versions or builds of Dolphin Emulator on the <a href="(^-16^)">download page</a> or the <a href="(^-17^)">development versions page</a>. You can find different versions or regions of GameCube and Wii games on various websites or sources.</li>
|
158 |
-
<li>Try a different game file format or compression method. Dolphin Emulator supports ISO and WBFS file formats, as well as compressed formats such as GCZ, CISO, RVZ, etc. Some formats or compression methods may work better or worse than others depending on the game and your device.</li>
|
159 |
-
<li>Report the error or issue to the developers or support team of Dolphin Emulator. You can use the <a href="(^-18^)">issue tracker</a> on GitHub, the <a href="(^-19^)">forums</a>, the <a href="(^-20^)">Discord server</a>, or the <a href="(^-21^)">contact form</a> on the official website to report the error or issue. Provide as much information as possible, such as your device model and specifications, Dolphin Emulator version and settings, game name and version, error message and screenshot, steps to reproduce the error or issue, etc.</li>
|
160 |
-
</ul>
|
161 |
-
<li>If you experience problems with downloading, installing, updating, or uninstalling Dolphin Emulator Android 6.0 APK, try these tips:</li>
|
162 |
-
<ul>
|
163 |
-
<li>Check your device's storage space and make sure you have enough free space to download, install, update, or uninstall Dolphin Emulator Android 6.0 APK.</li>
|
164 |
-
<li>Check your device's network connection and make sure you have a stable and fast internet connection to download, install, update, or uninstall Dolphin Emulator Android 6.0 APK.</li>
|
165 |
-
<li>Check your device's security settings and make sure you have enabled the option to allow installing apps from unknown sources.</li>
|
166 |
-
<li>Check the integrity and safety of the APK file you downloaded and make sure it is authentic, complete, and free from any malicious code or modification.</li>
|
167 |
-
<li>Use a reliable and reputable source to download Dolphin Emulator Android 6.0 APK. Avoid sources that may provide fake or corrupted files that may not work properly or cause errors and issues with your emulator.</li>
|
168 |
-
<li>Use a file manager app or tool to locate and manage the APK file on your device's storage. Avoid renaming, moving, deleting, or modifying the APK file in any way that may affect its installation or operation.</li>
|
169 |
-
<li>If you want to update Dolphin Emulator Android 6.0 APK, you can either download and install the latest version from the official website or source, or use the built-in updater feature in the app's settings menu. Do not use both methods at the same time as this may cause conflicts or errors.</li>
|
170 |
-
<li>If you want to uninstall Dolphin Emulator Android 6.0 APK, you can either use your device's settings app or a third-party uninstaller app or tool to remove it from your device. Make sure you also delete any leftover files or folders related to Dolphin Emulator Android 6.0 APK from your device's storage.</li>
|
171 |
-
</ul>
|
172 |
-
</ol>
|
173 |
-
<h1>Conclusion</h1>
|
174 |
-
<p>Dolphin Emulator Android 6.0 APK is a great way to play Nintendo GameCube and Wii games on your Android device. It has many features and benefits that make it one of the best emulators available for Android. However, it also has some requirements and challenges that you need to be aware of before using it on your device. You need to download, install, and use it properly according to your device's specifications and preferences. You also need to troubleshoot some errors and issues that may arise during its operation.</p>
|
175 |
-
<p>In this article, I have provided you with a comprehensive guide on how to download, install, and use Dolphin Emulator Android 6.0 APK on your Android device. I have also answered some frequently asked questions and shared some user reviews about this emulator. I hope this article has been helpful and informative for you.</p>
|
176 |
-
<p>If you have any questions, comments, feedback, or suggestions about this article or Dolphin Emulator Android 6.0 APK, please feel free to leave them below. I would love to hear from you and help you out. Thank you for reading and happy gaming!</p>
|
177 |
-
<h1>Frequently Asked Questions</h1>
|
178 |
-
<p>Here are some of the most frequently asked questions about Dolphin Emulator Android 6.0 APK:</p>
|
179 |
-
<h2>Is Dolphin Emulator Android 6.0 APK legal?</h2>
|
180 |
-
<p>Dolphin Emulator Android 6.0 APK is legal as long as you use it for personal and non-commercial purposes. Dolphin Emulator is a free and open-source software that does not violate any intellectual property rights or laws. However, the games that you play on Dolphin Emulator may be subject to copyright and licensing restrictions. You should only play games that you own legally from your own discs or backups. You should not download, share, or distribute games that you do not own or have permission to use.</p>
|
181 |
-
<h2>Is Dolphin Emulator Android 6.0 APK safe?</h2>
|
182 |
-
<p>Dolphin Emulator Android 6.0 APK is safe as long as you download it from a reliable and reputable source, such as the official website or source. You should also verify the integrity and safety of the downloaded file before installing it on your device. You should also scan the file with a reputable antivirus or anti-malware program to detect and remove any malicious code or modification that may be hidden in the file. You should also grant permissions and overcome security restrictions that may prevent Dolphin Emulator from accessing certain features and functions of your device.</p>
|
183 |
-
<h2>Is Dolphin Emulator Android 6.0 APK compatible with my device?</h2>
|
184 |
-
<p>Dolphin Emulator Android 6.0 APK is compatible with most Android devices that run on Android 5.0 (Lollipop) or higher and have a 64-bit processor (ARMv8 or x86_64). However, some devices may not be able to run Dolphin Emulator or some games smoothly or at all due to their hardware limitations or software issues. You should check your device's specifications and compare them with the minimum system requirements for Dolphin Emulator and the game you want to play. You should also check the compatibility list on the official website or the game wiki to see if your device and game are compatible with Dolphin Emulator.</p>
|
185 |
-
<h2>How can I improve the performance of Dolphin Emulator Android 6.0 APK?</h2>
|
186 |
-
<p>You can improve the performance of Dolphin Emulator Android 6.0 APK by following these tips:</p>
|
187 |
-
<ul>
|
188 |
-
<li>Use a powerful device that can handle the emulation workload.</li>
|
189 |
-
<li>Close any other apps or processes that may be running in the background and consuming your device's resources.</li>
|
190 |
-
<li>Clean your device's cache and memory using a cleaning app or tool.</li>
|
191 |
-
<li>Restart your device and launch Dolphin Emulator again.</li>
|
192 |
-
<li>Lower or disable some of the graphics and audio settings that may be taxing your device's capabilities.</li>
|
193 |
-
<li>Update Dolphin Emulator to the latest version available.</li>
|
194 |
-
<li>Update your device's software and drivers to the latest version available.</li>
|
195 |
-
<li>Try a different version or build of Dolphin Emulator or the game you are trying to play.</li>
|
196 |
-
<li>Try a different game file format or compression method.</li>
|
197 |
-
<li>Report any errors or issues to the developers or support team of Dolphin Emulator.</li>
|
198 |
-
</ul>
|
199 |
-
<h2>How can I get more games for Dolphin Emulator Android 6.0 APK?</h2>
|
200 |
-
<p>You can get more games for Dolphin Emulator Android 6.0 APK by following these steps:</p>
|
201 |
-
<ol>
|
202 |
-
<li>If you have the original GameCube or Wii discs, you can use a disc drive and a software tool, such as CleanRip, RawDump, or FriiDump to rip the discs and create ISO or WBFS files on your computer. You can also use a modded Wii console and a software tool, such as USB Loader GX, WiiFlow, or CFG USB Loader to rip the discs and create ISO or WBFS files on a USB drive.</li>
|
203 |
-
<li>If you have backup copies of GameCube or Wii games, you can use a software tool, such as Wii Backup Manager, Witgui, or Wii Backup Fusion to convert them into ISO or WBFS files on your computer.</li>
|
204 |
-
<li>If you want to download GameCube or Wii games from the internet, you can use various websites or sources that offer them legally and safely. However, you should be careful when downloading from these sources, as they may not be trustworthy or safe. Some of them may contain malware, viruses, spyware, adware, or other unwanted programs that can harm your device or compromise your privacy. Some of them may also provide fake or corrupted files that may not work properly or cause errors and issues with your emulator.</li>
|
205 |
-
<li>Once you have the ISO or WBFS files of the games you want to play, you need to transfer them to your Android device's storage. You can use a USB cable, a microSD card, a cloud service, or a wireless method to do so.</li>
|
206 |
-
<li>On your Android device, launch Dolphin Emulator and tap on the <strong>Add Folder</strong> button on the top right corner of the screen. This will allow you to browse your device's storage and select the folder where you stored your ISO or WBFS files.</li>
|
207 |
-
<li>Dolphin Emulator will scan the folder and display all the games that it can recognize in a grid view. You can tap on any game to see more details about it, such as title, region, size, rating, etc.</li>
|
208 |
-
<li>To load a game, simply tap on its icon and wait for Dolphin Emulator to launch it. You will see a loading screen with some information about the game and the emulator's status.</li>
|
209 |
-
<li>Once the game is loaded, you can start playing it on your Android device using either touch controls or physical controllers.</li>
|
210 |
-
</ol>
|
211 |
-
<h1>User Reviews</h1>
|
212 |
-
<p>Here are some of the user reviews about Dolphin Emulator Android 6.0 APK from various sources:</p>
|
213 |
-
<h2>Positive Reviews</h2>
|
214 |
-
<ul>
|
215 |
-
<li>"This is the best emulator for GameCube and Wii games on Android. It runs smoothly and has many options to customize. I can play my favorite games in HD with no lag or glitches. The touch controls are responsive and easy to use. The controller support is also great. I can connect my PS4 controller via Bluetooth and play wirelessly. The online multiplayer feature is also amazing. I can play with my friends online using Netplay or Wiimmfi. This emulator is a must-have for any Nintendo fan."</li>
|
216 |
-
<li>"I love this emulator. It allows me to play GameCube and Wii games on my phone that I never got to play before. The graphics are stunning and the sound is clear. The emulation is fast and stable. The settings are easy to understand and adjust. The compatibility list is impressive and growing. The developers are active and responsive. They update the app regularly with new features and bug fixes. They also listen to feedback and suggestions from the users. This emulator is worth every penny."</li>
|
217 |
-
<li>"This emulator is awesome. It works perfectly on my device and has no issues at all. I can play all the games I want with no problems. The graphics are beautiful and the sound is crisp. The emulation is accurate and faithful. The settings are comprehensive and flexible. The compatibility list is extensive and reliable. The developers are amazing and supportive. They update the app frequently with new features and bug fixes. They also communicate with the users and provide help and guidance. This emulator is a masterpiece."</li>
|
218 |
-
</ul>
|
219 |
-
<h2>Negative Reviews</h2>
|
220 |
-
<ul>
|
221 |
-
<li>"This emulator is terrible. It does not work on my device and has many issues. I cannot play any games with it because it crashes, freezes, slows down, glitches, or shows errors. The graphics are ugly and the sound is distorted. The emulation is poor and inaccurate. The settings are confusing and limited. The compatibility list is outdated and inaccurate. The developers are lazy and unresponsive. They do not update the app regularly or fix any bugs or issues. They also ignore feedback and complaints from the users. This emulator is a waste of time."</li>
|
222 |
-
<li>"I hate this emulator. It works poorly on my device and has many problems. I can only play a few games with it because it lags, skips, flickers, or shows errors. The graphics are low-quality and the sound is noisy. The emulation is slow and unstable. The settings are hard to understand and change. The compatibility list is incomplete and unreliable. The developers are rude and unhelpful. They do not update the app often or fix any bugs or issues. They also argue with feedback and suggestions from the users. This emulator is a joke."</li>
|
223 |
-
<li>"This emulator is disappointing . It works fine on my device but has many limitations. I can play some games with it but not all of them. The graphics are decent but not amazing. The sound is okay but not great. The emulation is fast but not smooth. The settings are simple but not enough. The compatibility list is accurate but not comprehensive. The developers are nice but not supportive. They update the app sometimes but not regularly. They also accept feedback and suggestions from the users but not implement them. This emulator is a letdown."</li>
|
224 |
-
</ul>
|
225 |
-
<h1></h1>
|
226 |
-
<p>This is the end of the article. I hope you enjoyed reading it and learned something new about Dolphin Emulator Android 6.0 APK. If you did, please share it with your friends and family who may also be interested in this topic. If you have any questions, comments, feedback, or suggestions about this article or Dolphin Emulator Android 6.0 APK, please leave them below. I would love to hear from you and help you out. Thank you for reading and happy gaming!</p> 197e85843d<br />
|
227 |
-
<br />
|
228 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Age of Conquest IV and Create Your Own Custom Maps and Scenarios.md
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Age of Conquest IV: A Turn-Based Grand Strategy Wargame</h1>
|
3 |
-
<p>Do you love strategy games that let you command your armies in historical and fictional scenarios? Do you enjoy playing solo or with your friends in cross-platform multiplayer matches? Do you want to create your own custom maps and scenarios with a map editor? If you answered yes to any of these questions, then you might want to check out Age of Conquest IV, a turn-based grand strategy wargame that offers all these features and more.</p>
|
4 |
-
<h2>age of conquest 4 download</h2><br /><p><b><b>Download Zip</b> ★★★★★ <a href="https://jinyurl.com/2uNUfM">https://jinyurl.com/2uNUfM</a></b></p><br /><br />
|
5 |
-
<h2>What is Age of Conquest IV?</h2>
|
6 |
-
<p>Age of Conquest IV is a game developed and published by Noble Master LLC, a small indie studio based in Hawaii. It was released in 2016 for Windows, Mac, Linux, Android, iOS, and web browsers. It is the fourth installment in the Age of Conquest series, which started in 2002 as a Java applet game.</p>
|
7 |
-
<p>Age of Conquest IV is a game that lets you create your own warring experience by choosing from hundreds of factions and maps that span from ancient to modern times. You can play as the Roman Empire, the Inca, France, Russia, Japan, or the Chinese Dynasties, among many others. You can also play on maps that depict Europe, Colonization, Asian Empires, American Wars, World Conquest, and more.</p>
|
8 |
-
<p>The game is turn-based, meaning that you and your opponents take turns to move your units, build your economy, conduct diplomacy, and wage war. The game has a streamlined user interface that makes it easy to learn and play. You can play against the computer AI, which has different difficulty levels and personalities. You can also play online or locally with other players in cross-platform multiplayer matches. You can form alliances and fight co-op style with the AI and other players for ultimate victory.</p>
|
9 |
-
<h3>Features of Age of Conquest IV</h3>
|
10 |
-
<p>Age of Conquest IV has many features that make it a fun and challenging game for strategy lovers. Here are some of them:</p>
|
11 |
-
<h4>Ancient to Modern</h4>
|
12 |
-
<p>The game offers a variety of map scenarios that cover different time periods and regions of the world. You can play on historical maps that depict real events and conflicts, such as the Rise of Rome, the Hundred Years' War, the Napoleonic Wars, or the Cold War. You can also play on fictional maps that imagine alternative scenarios or fantasy worlds, such as Middle Earth, Westeros, or Atlantis.</p>
|
13 |
-
<h4>Diplomacy & Economy</h4>
|
14 |
-
<p>The game also features a diplomacy and economy system that adds depth and realism to the gameplay. You can negotiate with other factions for peace, trade, alliances, or war. You can also manage your population, happiness, and taxes in each province. You have to balance your income and expenses, as well as deal with rebellions and revolts if your people are unhappy.</p>
|
15 |
-
<h4>Single & Multiplayer</h4>
|
16 |
-
<p>The game allows you to play solo or with others in various modes. You can play skirmish matches against the AI or hotseat with friends and family on the same device. You can also play online with other players from around the world in cross-platform multiplayer matches. The game has a ranking and rating system that tracks your performance and skill level. You can also chat with other players and join clans for more social interaction.</p>
|
17 |
-
<h4>Modding</h4>
|
18 |
-
<p>The game also supports modding, which means that you can create your own custom maps and scenarios with a map editor. You can use the built-in tools to design your own terrain, provinces, factions , and units. You can also import and export your maps and share them with other players. You can also download and play maps created by other players from the online map store. You can rate and comment on the maps you play and give feedback to the creators.</p>
|
19 |
-
<h3>How to Download Age of Conquest IV?</h3>
|
20 |
-
<p>If you are interested in playing Age of Conquest IV, you have several options to download the game. Here are some of them:</p>
|
21 |
-
<p>age of conquest 4 free download<br />
|
22 |
-
age of conquest 4 steam download<br />
|
23 |
-
age of conquest 4 download for pc<br />
|
24 |
-
age of conquest 4 download for android<br />
|
25 |
-
age of conquest 4 download for ios<br />
|
26 |
-
age of conquest 4 download for mac<br />
|
27 |
-
age of conquest 4 download for linux<br />
|
28 |
-
age of conquest 4 download maps<br />
|
29 |
-
age of conquest 4 download modding<br />
|
30 |
-
age of conquest 4 browser download<br />
|
31 |
-
age of conquest 4 full version download<br />
|
32 |
-
age of conquest 4 all maps download<br />
|
33 |
-
age of conquest 4 portable download<br />
|
34 |
-
age of conquest 4 bundle download<br />
|
35 |
-
age of conquest 4 generic download<br />
|
36 |
-
age of conquest 4 legacy download<br />
|
37 |
-
age of conquest 4 install download<br />
|
38 |
-
age of conquest 4 update download<br />
|
39 |
-
age of conquest 4 patch download<br />
|
40 |
-
age of conquest 4 online download<br />
|
41 |
-
age of conquest 4 offline download<br />
|
42 |
-
age of conquest 4 turn based strategy game download<br />
|
43 |
-
age of conquest 4 grand strategy wargame download<br />
|
44 |
-
age of conquest 4 roman empire download<br />
|
45 |
-
age of conquest 4 inca download<br />
|
46 |
-
age of conquest 4 france download<br />
|
47 |
-
age of conquest 4 russia download<br />
|
48 |
-
age of conquest 4 japan download<br />
|
49 |
-
age of conquest 4 china download<br />
|
50 |
-
age of conquest 4 europe map download<br />
|
51 |
-
age of conquest 4 colonization map download<br />
|
52 |
-
age of conquest 4 asian empires map download<br />
|
53 |
-
age of conquest 4 american wars map download<br />
|
54 |
-
age of conquest 4 world conquest map download<br />
|
55 |
-
age of conquest 4 diplomacy and economy game download<br />
|
56 |
-
age of conquest 4 single player game download<br />
|
57 |
-
age of conquest 4 multiplayer game download<br />
|
58 |
-
age of conquest 4 co-op game download<br />
|
59 |
-
age of conquest 4 cross-platform game download<br />
|
60 |
-
age of conquest 4 ranking and rating game download<br />
|
61 |
-
age of conquest 4 skirmish mode game download<br />
|
62 |
-
age of conquest 4 hotseat mode game download<br />
|
63 |
-
age of conquest 4 modding tool game download<br />
|
64 |
-
age of conquest 4 map editor game download<br />
|
65 |
-
age of conquest 4 custom maps game download<br />
|
66 |
-
age of conquest iv free to play game download</p>
|
67 |
-
<h4>Direct Downloads</h4>
|
68 |
-
<p>You can download the game directly from the official website of Noble Master LLC. The website offers downloads for Windows, Mac, Linux, Android, and iOS devices. You can also play the game online on your web browser without downloading anything. The direct downloads are free, but they have some limitations, such as fewer maps and factions, and no multiplayer mode. You can unlock the full version of the game by purchasing a license key for $4.99 USD.</p>
|
69 |
-
<h4>3rd Party Downloads</h4>
|
70 |
-
<p>You can also download the game from 3rd party platforms, such as Steam, Google Play, App Store, or Amazon. These platforms offer the full version of the game for a similar price as the direct downloads. You can also enjoy some additional features, such as achievements, leaderboards, cloud saves, and more. However, you may need to create an account and install additional software to use these platforms.</p>
|
71 |
-
<h3>What are the System Requirements for Age of Conquest IV?</h3>
|
72 |
-
<p>Age of Conquest IV is a relatively low-spec game that can run on most devices. However, you may still want to check the system requirements before downloading the game to ensure a smooth gameplay experience. Here are the minimum and recommended requirements for the game:</p>
|
73 |
-
<h4>Minimum Requirements</h4>
|
74 |
-
<table>
|
75 |
-
<tr>
|
76 |
-
<th>OS</th>
|
77 |
-
<th>CPU</th>
|
78 |
-
<th>RAM</th>
|
79 |
-
<th>Graphics</th>
|
80 |
-
<th>Storage</th>
|
81 |
-
</tr>
|
82 |
-
<tr>
|
83 |
-
<td>Windows XP or later</td>
|
84 |
-
<td>1 GHz single-core processor</td>
|
85 |
-
<td>512 MB</td>
|
86 |
-
<td>OpenGL 2.0 compatible with 128 MB VRAM</td>
|
87 |
-
<td>150 MB</td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>Mac OS X 10.7 or later</td>
|
91 |
-
<td>1 GHz single-core processor</td>
|
92 |
-
<td>512 MB</td>
|
93 |
-
<td>OpenGL 2.0 compatible with 128 MB VRAM</td>
|
94 |
-
<td>150 MB</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Linux (Ubuntu 12.04 or later)</td>
|
98 |
-
<td>1 GHz single-core processor</td>
|
99 |
-
<td>512 MB</td>
|
100 |
-
<td>OpenGL 2.0 compatible with 128 MB VRAM</td>
|
101 |
-
<td>150 MB</td>
|
102 |
-
</tr>
|
103 |
-
<tr>
|
104 |
-
<td>Android 4.0 or later</td>
|
105 |
-
<td>1 GHz single-core processor</td>
|
106 |
-
<td>512 MB</td>
|
107 |
-
<td>N/A</td>
|
108 |
-
<td>N/A</td>
|
109 |
-
</tr>
|
110 |
-
<tr>
|
111 |
-
<td>iOS 8.0 or later</td>
|
112 |
-
<td>N/A</td>
|
113 |
-
<td>N/A</td>
|
114 |
-
<td>N/A</td>
|
115 |
-
<td>N/A</td>
|
116 |
-
</tr>
|
117 |
-
<tr>
|
118 |
-
<td>Web Browser (Chrome, Firefox, Safari, Edge)</td>
|
119 |
-
<td>N/A</td>
|
120 |
-
<td>N/A</td>
|
121 |
-
<td>N/A</td>
|
122 |
-
<td>N/A</td>
|
123 |
-
</tr>
|
124 |
-
<h4>Recommended Requirements</h4>
|
125 |
-
<table>
|
126 |
-
<tr>
|
127 |
-
<th>OS</th>
|
128 |
-
<th>CPU</th>
|
129 |
-
<th>RAM</th>
|
130 |
-
<th>Graphics</th>
|
131 |
-
<th>Storage</th>
|
132 |
-
</tr>
|
133 |
-
<tr> <td>Windows 7 or later</td>
|
134 |
-
<td>2 GHz dual-core processor</td>
|
135 |
-
<td>1 GB</td>
|
136 |
-
<td>OpenGL 2.0 compatible with 256 MB VRAM</td>
|
137 |
-
<td>300 MB</td>
|
138 |
-
</tr>
|
139 |
-
<tr>
|
140 |
-
<td>Mac OS X 10.10 or later</td>
|
141 |
-
<td>2 GHz dual-core processor</td>
|
142 |
-
<td>1 GB</td>
|
143 |
-
<td>OpenGL 2.0 compatible with 256 MB VRAM</td>
|
144 |
-
<td>300 MB</td>
|
145 |
-
</tr>
|
146 |
-
<tr>
|
147 |
-
<td>Linux (Ubuntu 14.04 or later)</td>
|
148 |
-
<td>2 GHz dual-core processor</td>
|
149 |
-
<td>1 GB</td>
|
150 |
-
<td>OpenGL 2.0 compatible with 256 MB VRAM</td>
|
151 |
-
<td>300 MB</td>
|
152 |
-
</tr>
|
153 |
-
<tr>
|
154 |
-
<td>Android 5.0 or later</td>
|
155 |
-
<td>2 GHz dual-core processor</td>
|
156 |
-
<td>1 GB</td>
|
157 |
-
<td>N/A</td>
|
158 |
-
<td>N/A</td>
|
159 |
-
</tr>
|
160 |
-
<tr>
|
161 |
-
<td>iOS 10.0 or later</td>
|
162 |
-
<td>N/A</td>
|
163 |
-
<td>N/A</td>
|
164 |
-
<td>N/A</td>
|
165 |
-
<td>N/A</td>
|
166 |
-
</tr>
|
167 |
-
<h3>How to Play Age of Conquest IV?</h3>
|
168 |
-
<p>If you have downloaded and installed Age of Conquest IV, you may be wondering how to play the game. Here are some steps to help you get started:</p>
|
169 |
-
<h4>Tutorial</h4>
|
170 |
-
<p>The game has a tutorial mode that teaches you the basics of the game, such as how to move your units, build your economy, conduct diplomacy, and wage war. The tutorial mode consists of several missions that guide you through different aspects of the game. You can access the tutorial mode from the main menu by clicking on the "Tutorial" button. You can also watch video tutorials on the official website or YouTube channel of Noble Master LLC.</p>
|
171 |
-
<h4>Tips and Tricks</h4>
|
172 |
-
<p>The game also has a tips and tricks section that gives you some useful advice and information on how to play the game better. You can access the tips and tricks section from the main menu by clicking on the "Tips & Tricks" button. You can also find more tips and tricks on the official forum or wiki of Noble Master LLC.</p>
|
173 |
-
<h2>Conclusion</h2>
|
174 |
-
<p>Age of Conquest IV is a turn-based grand strategy wargame that lets you create your own warring experience by choosing from hundreds of factions and maps that span from ancient to modern times. You can play solo or with others in cross-platform multiplayer matches. You can also create your own custom maps and scenarios with a map editor. The game is easy to learn and play, but challenging and rewarding to master. If you are a fan of strategy games, you should definitely give Age of Conquest IV a try.</p>
|
175 |
-
<p>If you have any questions or feedback about the game, you can contact Noble Master LLC through their official website, email, or social media accounts. You can also join their community of players and modders on their forum, wiki, discord, or reddit.</p>
|
176 |
-
<h3>Frequently Asked Questions (FAQs)</h3>
|
177 |
-
<p>Here are some common questions and answers about Age of Conquest IV:</p>
|
178 |
-
<ol>
|
179 |
-
<li><b>Is Age of Conquest IV free?</b></li>
|
180 |
-
<p>The game is free to download and play, but it has some limitations, such as fewer maps and factions, and no multiplayer mode. You can unlock the full version of the game by purchasing a license key for $4.99 USD.</p>
|
181 |
-
<li><b>Is Age of Conquest IV online?</b></li>
|
182 |
-
<p>The game has an online mode that allows you to play with other players from around the world in cross-platform multiplayer matches. You need an internet connection and an account to play online.</p>
|
183 |
-
<li><b>Is Age of Conquest IV offline?</b></li>
|
184 |
-
<p>The game has an offline mode that allows you to play solo or hotseat with friends and family on the same device. You do not need an internet connection or an account to play offline.</p>
|
185 |
-
<li><b>Is Age of Conquest IV historical?</b></li>
|
186 |
-
<p>The game has historical maps that depict real events and conflicts, such as the Rise of Rome, the Hundred Years' War, the Napoleonic Wars, or the Cold War. The game also has fictional maps that imagine alternative scenarios or fantasy worlds, such as Middle Earth, Westeros, or Atlantis.</p>
|
187 |
-
<li><b>Is Age of Conquest IV realistic?</b></li>
|
188 |
-
<p>The game is not meant to be a realistic simulation of history or warfare, but rather a fun and challenging strategy game that offers a variety of map scenarios and gameplay options. The game does not aim to be historically accurate or politically correct, but rather to provide an enjoyable and creative and diverse warring experience.</p> 197e85843d<br />
|
189 |
-
<br />
|
190 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/env.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
############################################################################################################################
|
2 |
-
# 修改下面的参数
|
3 |
-
# (1)BASE_MODEL_NAME 代表你训练的基础模型
|
4 |
-
BASE_MODEL_NAME = "runwayml/stable-diffusion-v1-5"
|
5 |
-
|
6 |
-
# 是否开启lora
|
7 |
-
# (2)LORA_WEIGHTS_PATH 代码你上传到huggingface后的lora权重。
|
8 |
-
# LORA_WEIGHTS_PATH = None 表示不适应lora
|
9 |
-
LORA_WEIGHTS_PATH = "1toTree/demo_test"
|
10 |
-
|
11 |
-
# (3)PROMPTS 需要展示的prompt文本
|
12 |
-
PROMPTS = "cartoon face"
|
13 |
-
############################################################################################################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/chat-panel.tsx
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import Image from 'next/image'
|
5 |
-
import Textarea from 'react-textarea-autosize'
|
6 |
-
import { useAtomValue } from 'jotai'
|
7 |
-
import { useEnterSubmit } from '@/lib/hooks/use-enter-submit'
|
8 |
-
import { cn } from '@/lib/utils'
|
9 |
-
|
10 |
-
import BrushIcon from '@/assets/images/brush.svg'
|
11 |
-
import ChatIcon from '@/assets/images/chat.svg'
|
12 |
-
import VisualSearchIcon from '@/assets/images/visual-search.svg'
|
13 |
-
import SendIcon from '@/assets/images/send.svg'
|
14 |
-
import PinIcon from '@/assets/images/pin.svg'
|
15 |
-
import PinFillIcon from '@/assets/images/pin-fill.svg'
|
16 |
-
|
17 |
-
import { useBing } from '@/lib/hooks/use-bing'
|
18 |
-
import { voiceListenAtom } from '@/state'
|
19 |
-
import Voice from './voice'
|
20 |
-
import { ChatImage } from './chat-image'
|
21 |
-
import { ChatAttachments } from './chat-attachments'
|
22 |
-
|
23 |
-
export interface ChatPanelProps
|
24 |
-
extends Pick<
|
25 |
-
ReturnType<typeof useBing>,
|
26 |
-
| 'generating'
|
27 |
-
| 'input'
|
28 |
-
| 'setInput'
|
29 |
-
| 'sendMessage'
|
30 |
-
| 'resetConversation'
|
31 |
-
| 'isSpeaking'
|
32 |
-
| 'attachmentList'
|
33 |
-
| 'uploadImage'
|
34 |
-
| 'setAttachmentList'
|
35 |
-
> {
|
36 |
-
id?: string
|
37 |
-
className?: string
|
38 |
-
}
|
39 |
-
|
40 |
-
export function ChatPanel({
|
41 |
-
isSpeaking,
|
42 |
-
generating,
|
43 |
-
input,
|
44 |
-
setInput,
|
45 |
-
className,
|
46 |
-
sendMessage,
|
47 |
-
resetConversation,
|
48 |
-
attachmentList,
|
49 |
-
uploadImage,
|
50 |
-
setAttachmentList
|
51 |
-
}: ChatPanelProps) {
|
52 |
-
const inputRef = React.useRef<HTMLTextAreaElement>(null)
|
53 |
-
const {formRef, onKeyDown} = useEnterSubmit()
|
54 |
-
const [focused, setFocused] = React.useState(false)
|
55 |
-
const [active, setActive] = React.useState(false)
|
56 |
-
const [pin, setPin] = React.useState(false)
|
57 |
-
const [tid, setTid] = React.useState<any>()
|
58 |
-
const voiceListening = useAtomValue(voiceListenAtom)
|
59 |
-
|
60 |
-
const setBlur = React.useCallback(() => {
|
61 |
-
clearTimeout(tid)
|
62 |
-
setActive(false)
|
63 |
-
const _tid = setTimeout(() => setFocused(false), 2000);
|
64 |
-
setTid(_tid)
|
65 |
-
}, [tid])
|
66 |
-
|
67 |
-
const setFocus = React.useCallback(() => {
|
68 |
-
setFocused(true)
|
69 |
-
setActive(true)
|
70 |
-
clearTimeout(tid)
|
71 |
-
inputRef.current?.focus()
|
72 |
-
}, [tid])
|
73 |
-
|
74 |
-
React.useEffect(() => {
|
75 |
-
if (input) {
|
76 |
-
setFocus()
|
77 |
-
}
|
78 |
-
}, [input])
|
79 |
-
|
80 |
-
return (
|
81 |
-
<form
|
82 |
-
className={cn('chat-panel', className)}
|
83 |
-
onSubmit={async e => {
|
84 |
-
e.preventDefault()
|
85 |
-
if (generating) {
|
86 |
-
return;
|
87 |
-
}
|
88 |
-
if (!input?.trim()) {
|
89 |
-
return
|
90 |
-
}
|
91 |
-
setInput('')
|
92 |
-
setPin(false)
|
93 |
-
await sendMessage(input)
|
94 |
-
}}
|
95 |
-
ref={formRef}
|
96 |
-
>
|
97 |
-
<div className="action-bar pb-4">
|
98 |
-
<div className={cn('action-root', { focus: active || pin })} speech-state="hidden" visual-search="" drop-target="">
|
99 |
-
<div className="fade bottom">
|
100 |
-
<div className="background"></div>
|
101 |
-
</div>
|
102 |
-
<div className={cn('outside-left-container', { collapsed: focused })}>
|
103 |
-
<div className="button-compose-wrapper">
|
104 |
-
<button className="body-2 button-compose" type="button" aria-label="新主题" onClick={resetConversation}>
|
105 |
-
<div className="button-compose-content">
|
106 |
-
<Image className="pl-2" alt="brush" src={BrushIcon} width={40} />
|
107 |
-
<div className="button-compose-text">新主题</div>
|
108 |
-
</div>
|
109 |
-
</button>
|
110 |
-
</div>
|
111 |
-
</div>
|
112 |
-
<div
|
113 |
-
className={cn('main-container', { active: active || pin })}
|
114 |
-
style={{ minHeight: pin ? '360px' : undefined }}
|
115 |
-
onClick={setFocus}
|
116 |
-
onBlur={setBlur}
|
117 |
-
>
|
118 |
-
<div className="main-bar">
|
119 |
-
<Image alt="chat" src={ChatIcon} width={20} color="blue" />
|
120 |
-
<Textarea
|
121 |
-
ref={inputRef}
|
122 |
-
tabIndex={0}
|
123 |
-
onKeyDown={onKeyDown}
|
124 |
-
rows={1}
|
125 |
-
value={input}
|
126 |
-
onChange={e => setInput(e.target.value.slice(0, 4000))}
|
127 |
-
placeholder={voiceListening ? '持续对话中...对话完成说“发送”即可' : 'Shift + Enter 换行'}
|
128 |
-
spellCheck={false}
|
129 |
-
className="message-input min-h-[24px] -mx-1 w-full text-base resize-none bg-transparent focus-within:outline-none"
|
130 |
-
/>
|
131 |
-
<ChatImage uploadImage={uploadImage}>
|
132 |
-
<Image alt="visual-search" src={VisualSearchIcon} width={24} />
|
133 |
-
</ChatImage>
|
134 |
-
<Voice setInput={setInput} sendMessage={sendMessage} isSpeaking={isSpeaking} input={input} />
|
135 |
-
<button type="submit">
|
136 |
-
<Image alt="send" src={SendIcon} width={20} style={{ marginTop: '2px' }} />
|
137 |
-
</button>
|
138 |
-
</div>
|
139 |
-
<ChatAttachments attachmentList={attachmentList} setAttachmentList={setAttachmentList} uploadImage={uploadImage} />
|
140 |
-
<div className="body-1 bottom-bar">
|
141 |
-
<div className="letter-counter"><span>{input.length}</span>/4000</div>
|
142 |
-
<button onClick={() => {
|
143 |
-
setPin(!pin)
|
144 |
-
}} className="pr-2">
|
145 |
-
<Image alt="pin" src={pin ? PinFillIcon : PinIcon} width={20} />
|
146 |
-
</button>
|
147 |
-
</div>
|
148 |
-
</div>
|
149 |
-
</div>
|
150 |
-
</div>
|
151 |
-
</form>
|
152 |
-
)
|
153 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/stylegan/op/readme.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
Code from [rosinality-stylegan2-pytorch-cp](https://github.com/senior-sigan/rosinality-stylegan2-pytorch-cpu)
|
2 |
-
|
3 |
-
Scripts to convert rosinality/stylegan2-pytorch to the CPU compatible format
|
4 |
-
|
5 |
-
If you would like to use CPU for testing or have a problem regarding the cpp extention (fused and upfirdn2d), please make the following changes:
|
6 |
-
|
7 |
-
Change `model.stylegan.op` to `model.stylegan.op_cpu`
|
8 |
-
https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/util.py#L14
|
9 |
-
|
10 |
-
https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/simple_augment.py#L12
|
11 |
-
|
12 |
-
https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/stylegan/model.py#L11
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/downloadable_library.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
import json
|
3 |
-
import zipfile
|
4 |
-
from io import BytesIO
|
5 |
-
from pathlib import Path
|
6 |
-
from typing import List
|
7 |
-
|
8 |
-
from fastapi import HTTPException
|
9 |
-
|
10 |
-
from voicevox_engine.model import DownloadableLibrary
|
11 |
-
|
12 |
-
__all__ = ["LibraryManager"]
|
13 |
-
|
14 |
-
INFO_FILE = "metas.json"
|
15 |
-
|
16 |
-
|
17 |
-
class LibraryManager:
|
18 |
-
def __init__(self, library_root_dir: Path):
|
19 |
-
self.library_root_dir = library_root_dir
|
20 |
-
self.library_root_dir.mkdir(exist_ok=True)
|
21 |
-
|
22 |
-
def downloadable_libraries(self):
|
23 |
-
# == ダウンロード情報をネットワーク上から取得する場合
|
24 |
-
# url = "https://example.com/downloadable_libraries.json"
|
25 |
-
# response = requests.get(url)
|
26 |
-
# return list(map(DownloadableLibrary.parse_obj, response.json()))
|
27 |
-
|
28 |
-
# == ダウンロード情報をjsonファイルから取得する場合
|
29 |
-
# with open(
|
30 |
-
# self.root_dir / "engine_manifest_assets" / "downloadable_libraries.json",
|
31 |
-
# encoding="utf-8",
|
32 |
-
# ) as f:
|
33 |
-
# return list(map(DownloadableLibrary.parse_obj, json.load(f)))
|
34 |
-
|
35 |
-
# ダミーとして、speaker_infoのアセットを読み込む
|
36 |
-
with open(
|
37 |
-
"./engine_manifest_assets/downloadable_libraries.json",
|
38 |
-
encoding="utf-8",
|
39 |
-
) as f:
|
40 |
-
libraries = json.load(f)
|
41 |
-
speaker_info = libraries[0]["speakers"][0]["speaker_info"]
|
42 |
-
mock_root_dir = Path("./speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff")
|
43 |
-
speaker_info["policy"] = (mock_root_dir / "policy.md").read_text()
|
44 |
-
speaker_info["portrait"] = base64.b64encode(
|
45 |
-
(mock_root_dir / "portrait.png").read_bytes()
|
46 |
-
)
|
47 |
-
for style_info in speaker_info["style_infos"]:
|
48 |
-
style_id = style_info["id"]
|
49 |
-
style_info["icon"] = base64.b64encode(
|
50 |
-
(mock_root_dir / "icons" / f"{style_id}.png").read_bytes()
|
51 |
-
)
|
52 |
-
style_info["voice_samples"] = [
|
53 |
-
base64.b64encode(
|
54 |
-
(
|
55 |
-
mock_root_dir / "voice_samples" / f"{style_id}_{i:0>3}.wav"
|
56 |
-
).read_bytes()
|
57 |
-
)
|
58 |
-
for i in range(1, 4)
|
59 |
-
]
|
60 |
-
return list(map(DownloadableLibrary.parse_obj, libraries))
|
61 |
-
|
62 |
-
def installed_libraries(self) -> List[DownloadableLibrary]:
|
63 |
-
library = []
|
64 |
-
for library_dir in self.library_root_dir.iterdir():
|
65 |
-
if library_dir.is_dir():
|
66 |
-
with open(library_dir / INFO_FILE, encoding="utf-8") as f:
|
67 |
-
library.append(json.load(f))
|
68 |
-
return library
|
69 |
-
|
70 |
-
def install_library(self, library_id: str, file: BytesIO):
|
71 |
-
for downloadable_library in self.downloadable_libraries():
|
72 |
-
if downloadable_library.uuid == library_id:
|
73 |
-
library_info = downloadable_library.dict()
|
74 |
-
break
|
75 |
-
else:
|
76 |
-
raise HTTPException(status_code=404, detail="指定された音声ライブラリが見つかりません。")
|
77 |
-
library_dir = self.library_root_dir / library_id
|
78 |
-
library_dir.mkdir(exist_ok=True)
|
79 |
-
with open(library_dir / INFO_FILE, "w", encoding="utf-8") as f:
|
80 |
-
json.dump(library_info, f, indent=4, ensure_ascii=False)
|
81 |
-
with zipfile.ZipFile(file) as zf:
|
82 |
-
if zf.testzip() is not None:
|
83 |
-
raise HTTPException(status_code=422, detail="不正なZIPファイルです。")
|
84 |
-
|
85 |
-
zf.extractall(library_dir)
|
86 |
-
return library_dir
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/777DUKE/Ballin/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Ballin
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/adversarial/discriminators/msd.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import typing as tp
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import torch
|
11 |
-
import torch.nn as nn
|
12 |
-
|
13 |
-
from ...modules import NormConv1d
|
14 |
-
from .base import MultiDiscriminator, MultiDiscriminatorOutputType
|
15 |
-
|
16 |
-
|
17 |
-
class ScaleDiscriminator(nn.Module):
|
18 |
-
"""Waveform sub-discriminator.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
in_channels (int): Number of input channels.
|
22 |
-
out_channels (int): Number of output channels.
|
23 |
-
kernel_sizes (Sequence[int]): Kernel sizes for first and last convolutions.
|
24 |
-
filters (int): Number of initial filters for convolutions.
|
25 |
-
max_filters (int): Maximum number of filters.
|
26 |
-
downsample_scales (Sequence[int]): Scale for downsampling implemented as strided convolutions.
|
27 |
-
inner_kernel_sizes (Sequence[int] or None): Kernel sizes for inner convolutions.
|
28 |
-
groups (Sequence[int] or None): Groups for inner convolutions.
|
29 |
-
strides (Sequence[int] or None): Strides for inner convolutions.
|
30 |
-
paddings (Sequence[int] or None): Paddings for inner convolutions.
|
31 |
-
norm (str): Normalization method.
|
32 |
-
activation (str): Activation function.
|
33 |
-
activation_params (dict): Parameters to provide to the activation function.
|
34 |
-
pad (str): Padding for initial convolution.
|
35 |
-
pad_params (dict): Parameters to provide to the padding module.
|
36 |
-
"""
|
37 |
-
def __init__(self, in_channels=1, out_channels=1, kernel_sizes: tp.Sequence[int] = [5, 3],
|
38 |
-
filters: int = 16, max_filters: int = 1024, downsample_scales: tp.Sequence[int] = [4, 4, 4, 4],
|
39 |
-
inner_kernel_sizes: tp.Optional[tp.Sequence[int]] = None, groups: tp.Optional[tp.Sequence[int]] = None,
|
40 |
-
strides: tp.Optional[tp.Sequence[int]] = None, paddings: tp.Optional[tp.Sequence[int]] = None,
|
41 |
-
norm: str = 'weight_norm', activation: str = 'LeakyReLU',
|
42 |
-
activation_params: dict = {'negative_slope': 0.2}, pad: str = 'ReflectionPad1d',
|
43 |
-
pad_params: dict = {}):
|
44 |
-
super().__init__()
|
45 |
-
assert len(kernel_sizes) == 2
|
46 |
-
assert kernel_sizes[0] % 2 == 1
|
47 |
-
assert kernel_sizes[1] % 2 == 1
|
48 |
-
assert (inner_kernel_sizes is None or len(inner_kernel_sizes) == len(downsample_scales))
|
49 |
-
assert (groups is None or len(groups) == len(downsample_scales))
|
50 |
-
assert (strides is None or len(strides) == len(downsample_scales))
|
51 |
-
assert (paddings is None or len(paddings) == len(downsample_scales))
|
52 |
-
self.activation = getattr(torch.nn, activation)(**activation_params)
|
53 |
-
self.convs = nn.ModuleList()
|
54 |
-
self.convs.append(
|
55 |
-
nn.Sequential(
|
56 |
-
getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
|
57 |
-
NormConv1d(in_channels, filters, kernel_size=np.prod(kernel_sizes), stride=1, norm=norm)
|
58 |
-
)
|
59 |
-
)
|
60 |
-
|
61 |
-
in_chs = filters
|
62 |
-
for i, downsample_scale in enumerate(downsample_scales):
|
63 |
-
out_chs = min(in_chs * downsample_scale, max_filters)
|
64 |
-
default_kernel_size = downsample_scale * 10 + 1
|
65 |
-
default_stride = downsample_scale
|
66 |
-
default_padding = (default_kernel_size - 1) // 2
|
67 |
-
default_groups = in_chs // 4
|
68 |
-
self.convs.append(
|
69 |
-
NormConv1d(in_chs, out_chs,
|
70 |
-
kernel_size=inner_kernel_sizes[i] if inner_kernel_sizes else default_kernel_size,
|
71 |
-
stride=strides[i] if strides else default_stride,
|
72 |
-
groups=groups[i] if groups else default_groups,
|
73 |
-
padding=paddings[i] if paddings else default_padding,
|
74 |
-
norm=norm))
|
75 |
-
in_chs = out_chs
|
76 |
-
|
77 |
-
out_chs = min(in_chs * 2, max_filters)
|
78 |
-
self.convs.append(NormConv1d(in_chs, out_chs, kernel_size=kernel_sizes[0], stride=1,
|
79 |
-
padding=(kernel_sizes[0] - 1) // 2, norm=norm))
|
80 |
-
self.conv_post = NormConv1d(out_chs, out_channels, kernel_size=kernel_sizes[1], stride=1,
|
81 |
-
padding=(kernel_sizes[1] - 1) // 2, norm=norm)
|
82 |
-
|
83 |
-
def forward(self, x: torch.Tensor):
|
84 |
-
fmap = []
|
85 |
-
for layer in self.convs:
|
86 |
-
x = layer(x)
|
87 |
-
x = self.activation(x)
|
88 |
-
fmap.append(x)
|
89 |
-
x = self.conv_post(x)
|
90 |
-
fmap.append(x)
|
91 |
-
# x = torch.flatten(x, 1, -1)
|
92 |
-
return x, fmap
|
93 |
-
|
94 |
-
|
95 |
-
class MultiScaleDiscriminator(MultiDiscriminator):
|
96 |
-
"""Multi-Scale (MSD) Discriminator,
|
97 |
-
|
98 |
-
Args:
|
99 |
-
in_channels (int): Number of input channels.
|
100 |
-
out_channels (int): Number of output channels.
|
101 |
-
downsample_factor (int): Downsampling factor between the different scales.
|
102 |
-
scale_norms (Sequence[str]): Normalization for each sub-discriminator.
|
103 |
-
**kwargs: Additional args for ScaleDiscriminator.
|
104 |
-
"""
|
105 |
-
def __init__(self, in_channels: int = 1, out_channels: int = 1, downsample_factor: int = 2,
|
106 |
-
scale_norms: tp.Sequence[str] = ['weight_norm', 'weight_norm', 'weight_norm'], **kwargs):
|
107 |
-
super().__init__()
|
108 |
-
self.discriminators = nn.ModuleList([
|
109 |
-
ScaleDiscriminator(in_channels, out_channels, norm=norm, **kwargs) for norm in scale_norms
|
110 |
-
])
|
111 |
-
self.downsample = nn.AvgPool1d(downsample_factor * 2, downsample_factor, padding=downsample_factor)
|
112 |
-
|
113 |
-
@property
|
114 |
-
def num_discriminators(self):
|
115 |
-
return len(self.discriminators)
|
116 |
-
|
117 |
-
def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
|
118 |
-
logits = []
|
119 |
-
fmaps = []
|
120 |
-
for i, disc in enumerate(self.discriminators):
|
121 |
-
if i != 0:
|
122 |
-
self.downsample(x)
|
123 |
-
logit, fmap = disc(x)
|
124 |
-
logits.append(logit)
|
125 |
-
fmaps.append(fmap)
|
126 |
-
return logits, fmaps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/optim/cosine_lr_scheduler.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import math
|
8 |
-
|
9 |
-
from torch.optim import Optimizer
|
10 |
-
from torch.optim.lr_scheduler import _LRScheduler
|
11 |
-
|
12 |
-
|
13 |
-
class CosineLRScheduler(_LRScheduler):
|
14 |
-
"""Cosine LR scheduler.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
optimizer (Optimizer): Torch optimizer.
|
18 |
-
warmup_steps (int): Number of warmup steps.
|
19 |
-
total_steps (int): Total number of steps.
|
20 |
-
lr_min_ratio (float): Minimum learning rate.
|
21 |
-
cycle_length (float): Cycle length.
|
22 |
-
"""
|
23 |
-
def __init__(self, optimizer: Optimizer, total_steps: int, warmup_steps: int,
|
24 |
-
lr_min_ratio: float = 0.0, cycle_length: float = 1.0):
|
25 |
-
self.warmup_steps = warmup_steps
|
26 |
-
assert self.warmup_steps >= 0
|
27 |
-
self.total_steps = total_steps
|
28 |
-
assert self.total_steps >= 0
|
29 |
-
self.lr_min_ratio = lr_min_ratio
|
30 |
-
self.cycle_length = cycle_length
|
31 |
-
super().__init__(optimizer)
|
32 |
-
|
33 |
-
def _get_sched_lr(self, lr: float, step: int):
|
34 |
-
if step < self.warmup_steps:
|
35 |
-
lr_ratio = step / self.warmup_steps
|
36 |
-
lr = lr_ratio * lr
|
37 |
-
elif step <= self.total_steps:
|
38 |
-
s = (step - self.warmup_steps) / (self.total_steps - self.warmup_steps)
|
39 |
-
lr_ratio = self.lr_min_ratio + 0.5 * (1 - self.lr_min_ratio) * \
|
40 |
-
(1. + math.cos(math.pi * s / self.cycle_length))
|
41 |
-
lr = lr_ratio * lr
|
42 |
-
else:
|
43 |
-
lr_ratio = self.lr_min_ratio
|
44 |
-
lr = lr_ratio * lr
|
45 |
-
return lr
|
46 |
-
|
47 |
-
def get_lr(self):
|
48 |
-
return [self._get_sched_lr(lr, self.last_epoch) for lr in self.base_lrs]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AONYLMR/White-box-Cartoonization/README.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
---
|
2 |
-
python_version: 3.7
|
3 |
-
title: White Box Cartoonization
|
4 |
-
emoji: 📚
|
5 |
-
colorFrom: purple
|
6 |
-
colorTo: green
|
7 |
-
sdk: gradio
|
8 |
-
sdk_version: 2.9.4
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
license: apache-2.0
|
12 |
-
duplicated_from: hylee/White-box-Cartoonization
|
13 |
-
---
|
14 |
-
|
15 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/trainer.py
DELETED
@@ -1,447 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import Dict
|
3 |
-
|
4 |
-
from diacritization_evaluation import der, wer
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
from torch import optim
|
8 |
-
from torch.cuda.amp import autocast
|
9 |
-
from torch.utils.tensorboard.writer import SummaryWriter
|
10 |
-
from tqdm import tqdm
|
11 |
-
from tqdm import trange
|
12 |
-
|
13 |
-
from .config_manager import ConfigManager
|
14 |
-
from dataset import load_iterators
|
15 |
-
from diacritizer import CBHGDiacritizer, Seq2SeqDiacritizer, GPTDiacritizer
|
16 |
-
from poetry_diacritizer.util.learning_rates import LearningRateDecay
|
17 |
-
from poetry_diacritizer.options import OptimizerType
|
18 |
-
from poetry_diacritizer.util.utils import (
|
19 |
-
categorical_accuracy,
|
20 |
-
count_parameters,
|
21 |
-
initialize_weights,
|
22 |
-
plot_alignment,
|
23 |
-
repeater,
|
24 |
-
)
|
25 |
-
|
26 |
-
import wandb
|
27 |
-
|
28 |
-
wandb.login()
|
29 |
-
|
30 |
-
|
31 |
-
class Trainer:
|
32 |
-
def run(self):
|
33 |
-
raise NotImplementedError
|
34 |
-
|
35 |
-
|
36 |
-
class GeneralTrainer(Trainer):
|
37 |
-
def __init__(self, config_path: str, model_kind: str, model_desc: str) -> None:
|
38 |
-
self.config_path = config_path
|
39 |
-
self.model_kind = model_kind
|
40 |
-
self.config_manager = ConfigManager(
|
41 |
-
config_path=config_path, model_kind=model_kind
|
42 |
-
)
|
43 |
-
self.config = self.config_manager.config
|
44 |
-
self.losses = []
|
45 |
-
self.lr = 0
|
46 |
-
self.pad_idx = 0
|
47 |
-
self.criterion = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
|
48 |
-
self.set_device()
|
49 |
-
|
50 |
-
self.config_manager.create_remove_dirs()
|
51 |
-
self.text_encoder = self.config_manager.text_encoder
|
52 |
-
self.start_symbol_id = self.text_encoder.start_symbol_id
|
53 |
-
self.summary_manager = SummaryWriter(log_dir=self.config_manager.log_dir)
|
54 |
-
if model_desc == "":
|
55 |
-
model_desc = self.model_kind
|
56 |
-
wandb.init(project="diacratization", name=model_desc, config=self.config)
|
57 |
-
self.model = self.config_manager.get_model()
|
58 |
-
|
59 |
-
self.optimizer = self.get_optimizer()
|
60 |
-
self.model = self.model.to(self.device)
|
61 |
-
|
62 |
-
self.load_model(model_path=self.config.get("train_resume_model_path"))
|
63 |
-
self.load_diacritizer()
|
64 |
-
|
65 |
-
self.initialize_model()
|
66 |
-
|
67 |
-
self.print_config()
|
68 |
-
|
69 |
-
def set_device(self):
|
70 |
-
if self.config.get("device"):
|
71 |
-
self.device = self.config["device"]
|
72 |
-
else:
|
73 |
-
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
74 |
-
|
75 |
-
def print_config(self):
|
76 |
-
self.config_manager.dump_config()
|
77 |
-
self.config_manager.print_config()
|
78 |
-
|
79 |
-
if self.global_step > 1:
|
80 |
-
print(f"loaded form {self.global_step}")
|
81 |
-
|
82 |
-
parameters_count = count_parameters(self.model)
|
83 |
-
print(f"The model has {parameters_count} trainable parameters parameters")
|
84 |
-
|
85 |
-
def load_diacritizer(self):
|
86 |
-
if self.model_kind in ["cbhg", "baseline"]:
|
87 |
-
self.diacritizer = CBHGDiacritizer(self.config_path, self.model_kind)
|
88 |
-
elif self.model_kind in ["seq2seq", "tacotron_based"]:
|
89 |
-
self.diacritizer = Seq2SeqDiacritizer(self.config_path, self.model_kind)
|
90 |
-
elif self.model_kind in ["gpt"]:
|
91 |
-
self.diacritizer = GPTDiacritizer(self.config_path, self.model_kind)
|
92 |
-
|
93 |
-
def initialize_model(self):
|
94 |
-
if self.global_step > 1:
|
95 |
-
return
|
96 |
-
if self.model_kind == "transformer":
|
97 |
-
print("Initializing using xavier_uniform_")
|
98 |
-
self.model.apply(initialize_weights)
|
99 |
-
|
100 |
-
def print_losses(self, step_results, tqdm):
|
101 |
-
self.summary_manager.add_scalar(
|
102 |
-
"loss/loss", step_results["loss"], global_step=self.global_step
|
103 |
-
)
|
104 |
-
|
105 |
-
tqdm.display(f"loss: {step_results['loss']}", pos=3)
|
106 |
-
for pos, n_steps in enumerate(self.config["n_steps_avg_losses"]):
|
107 |
-
if len(self.losses) > n_steps:
|
108 |
-
|
109 |
-
self.summary_manager.add_scalar(
|
110 |
-
f"loss/loss-{n_steps}",
|
111 |
-
sum(self.losses[-n_steps:]) / n_steps,
|
112 |
-
global_step=self.global_step,
|
113 |
-
)
|
114 |
-
tqdm.display(
|
115 |
-
f"{n_steps}-steps average loss: {sum(self.losses[-n_steps:]) / n_steps}",
|
116 |
-
pos=pos + 4,
|
117 |
-
)
|
118 |
-
|
119 |
-
def evaluate(self, iterator, tqdm, use_target=True, log = True):
|
120 |
-
epoch_loss = 0
|
121 |
-
epoch_acc = 0
|
122 |
-
self.model.eval()
|
123 |
-
tqdm.set_description(f"Eval: {self.global_step}")
|
124 |
-
with torch.no_grad():
|
125 |
-
for batch_inputs in iterator:
|
126 |
-
batch_inputs["src"] = batch_inputs["src"].to(self.device)
|
127 |
-
batch_inputs["lengths"] = batch_inputs["lengths"].to("cpu")
|
128 |
-
if use_target:
|
129 |
-
batch_inputs["target"] = batch_inputs["target"].to(self.device)
|
130 |
-
else:
|
131 |
-
batch_inputs["target"] = None
|
132 |
-
|
133 |
-
outputs = self.model(
|
134 |
-
src=batch_inputs["src"],
|
135 |
-
target=batch_inputs["target"],
|
136 |
-
lengths=batch_inputs["lengths"],
|
137 |
-
)
|
138 |
-
|
139 |
-
predictions = outputs["diacritics"]
|
140 |
-
|
141 |
-
predictions = predictions.view(-1, predictions.shape[-1])
|
142 |
-
targets = batch_inputs["target"]
|
143 |
-
targets = targets.view(-1)
|
144 |
-
loss = self.criterion(predictions, targets.to(self.device))
|
145 |
-
acc = categorical_accuracy(
|
146 |
-
predictions, targets.to(self.device), self.pad_idx
|
147 |
-
)
|
148 |
-
|
149 |
-
epoch_loss += loss.item()
|
150 |
-
epoch_acc += acc.item()
|
151 |
-
if log:
|
152 |
-
wandb.log({"evaluate_loss": loss.item(), "evaluate_acc": acc.item()})
|
153 |
-
tqdm.update()
|
154 |
-
|
155 |
-
tqdm.reset()
|
156 |
-
return epoch_loss / len(iterator), epoch_acc / len(iterator)
|
157 |
-
|
158 |
-
def evaluate_with_error_rates(self, iterator, tqdm, log = True):
|
159 |
-
all_orig = []
|
160 |
-
all_predicted = []
|
161 |
-
results = {}
|
162 |
-
self.diacritizer.set_model(self.model)
|
163 |
-
evaluated_batches = 0
|
164 |
-
tqdm.set_description(f"Calculating DER/WER {self.global_step}: ")
|
165 |
-
for i, batch in enumerate(iterator):
|
166 |
-
if evaluated_batches > int(self.config["error_rates_n_batches"]):
|
167 |
-
break
|
168 |
-
|
169 |
-
predicted = self.diacritizer.diacritize_batch(batch)
|
170 |
-
all_predicted += predicted
|
171 |
-
all_orig += batch["original"]
|
172 |
-
if i > self.config["max_eval_batches"]:
|
173 |
-
break
|
174 |
-
tqdm.update()
|
175 |
-
|
176 |
-
summary_texts = []
|
177 |
-
orig_path = os.path.join(self.config_manager.prediction_dir, f"original.txt")
|
178 |
-
predicted_path = os.path.join(
|
179 |
-
self.config_manager.prediction_dir, f"predicted.txt"
|
180 |
-
)
|
181 |
-
|
182 |
-
table = wandb.Table(columns=["original", "predicted"])
|
183 |
-
with open(orig_path, "w", encoding="utf8") as file:
|
184 |
-
for sentence in all_orig:
|
185 |
-
file.write(f"{sentence}\n")
|
186 |
-
|
187 |
-
with open(predicted_path, "w", encoding="utf8") as file:
|
188 |
-
for sentence in all_predicted:
|
189 |
-
file.write(f"{sentence}\n")
|
190 |
-
|
191 |
-
for i in range(int(self.config["n_predicted_text_tensorboard"])):
|
192 |
-
if i > len(all_predicted):
|
193 |
-
break
|
194 |
-
|
195 |
-
summary_texts.append(
|
196 |
-
(f"eval-text/{i}", f"{ all_orig[i]} |-> {all_predicted[i]}")
|
197 |
-
)
|
198 |
-
if i < 10:
|
199 |
-
table.add_data(all_orig[i], all_predicted[i])
|
200 |
-
|
201 |
-
if log:
|
202 |
-
wandb.log({f"prediction_{self.global_step}": table}, commit=False)
|
203 |
-
|
204 |
-
results["DER"] = der.calculate_der_from_path(orig_path, predicted_path)
|
205 |
-
results["DER*"] = der.calculate_der_from_path(
|
206 |
-
orig_path, predicted_path, case_ending=False
|
207 |
-
)
|
208 |
-
results["WER"] = wer.calculate_wer_from_path(orig_path, predicted_path)
|
209 |
-
results["WER*"] = wer.calculate_wer_from_path(
|
210 |
-
orig_path, predicted_path, case_ending=False
|
211 |
-
)
|
212 |
-
if log:
|
213 |
-
wandb.log(results)
|
214 |
-
tqdm.reset()
|
215 |
-
return results, summary_texts
|
216 |
-
|
217 |
-
def run(self):
|
218 |
-
scaler = torch.cuda.amp.GradScaler()
|
219 |
-
train_iterator, _, validation_iterator = load_iterators(self.config_manager)
|
220 |
-
print("data loaded")
|
221 |
-
print("----------------------------------------------------------")
|
222 |
-
tqdm_eval = trange(0, len(validation_iterator), leave=True)
|
223 |
-
tqdm_error_rates = trange(0, len(validation_iterator), leave=True)
|
224 |
-
tqdm_eval.set_description("Eval")
|
225 |
-
tqdm_error_rates.set_description("WER/DER : ")
|
226 |
-
tqdm = trange(self.global_step, self.config["max_steps"] + 1, leave=True)
|
227 |
-
|
228 |
-
for batch_inputs in repeater(train_iterator):
|
229 |
-
tqdm.set_description(f"Global Step {self.global_step}")
|
230 |
-
if self.config["use_decay"]:
|
231 |
-
self.lr = self.adjust_learning_rate(
|
232 |
-
self.optimizer, global_step=self.global_step
|
233 |
-
)
|
234 |
-
self.optimizer.zero_grad()
|
235 |
-
if self.device == "cuda" and self.config["use_mixed_precision"]:
|
236 |
-
with autocast():
|
237 |
-
step_results = self.run_one_step(batch_inputs)
|
238 |
-
scaler.scale(step_results["loss"]).backward()
|
239 |
-
scaler.unscale_(self.optimizer)
|
240 |
-
if self.config.get("CLIP"):
|
241 |
-
torch.nn.utils.clip_grad_norm_(
|
242 |
-
self.model.parameters(), self.config["CLIP"]
|
243 |
-
)
|
244 |
-
|
245 |
-
scaler.step(self.optimizer)
|
246 |
-
|
247 |
-
scaler.update()
|
248 |
-
else:
|
249 |
-
step_results = self.run_one_step(batch_inputs)
|
250 |
-
|
251 |
-
loss = step_results["loss"]
|
252 |
-
loss.backward()
|
253 |
-
if self.config.get("CLIP"):
|
254 |
-
torch.nn.utils.clip_grad_norm_(
|
255 |
-
self.model.parameters(), self.config["CLIP"]
|
256 |
-
)
|
257 |
-
self.optimizer.step()
|
258 |
-
|
259 |
-
self.losses.append(step_results["loss"].item())
|
260 |
-
wandb.log({"train_loss": step_results["loss"].item()})
|
261 |
-
|
262 |
-
self.print_losses(step_results, tqdm)
|
263 |
-
|
264 |
-
self.summary_manager.add_scalar(
|
265 |
-
"meta/learning_rate", self.lr, global_step=self.global_step
|
266 |
-
)
|
267 |
-
|
268 |
-
if self.global_step % self.config["model_save_frequency"] == 0:
|
269 |
-
torch.save(
|
270 |
-
{
|
271 |
-
"global_step": self.global_step,
|
272 |
-
"model_state_dict": self.model.state_dict(),
|
273 |
-
"optimizer_state_dict": self.optimizer.state_dict(),
|
274 |
-
},
|
275 |
-
os.path.join(
|
276 |
-
self.config_manager.models_dir,
|
277 |
-
f"{self.global_step}-snapshot.pt",
|
278 |
-
),
|
279 |
-
)
|
280 |
-
|
281 |
-
if self.global_step % self.config["evaluate_frequency"] == 0:
|
282 |
-
loss, acc = self.evaluate(validation_iterator, tqdm_eval)
|
283 |
-
self.summary_manager.add_scalar(
|
284 |
-
"evaluate/loss", loss, global_step=self.global_step
|
285 |
-
)
|
286 |
-
self.summary_manager.add_scalar(
|
287 |
-
"evaluate/acc", acc, global_step=self.global_step
|
288 |
-
)
|
289 |
-
tqdm.display(
|
290 |
-
f"Evaluate {self.global_step}: accuracy, {acc}, loss: {loss}", pos=8
|
291 |
-
)
|
292 |
-
self.model.train()
|
293 |
-
|
294 |
-
if (
|
295 |
-
self.global_step % self.config["evaluate_with_error_rates_frequency"]
|
296 |
-
== 0
|
297 |
-
):
|
298 |
-
error_rates, summery_texts = self.evaluate_with_error_rates(
|
299 |
-
validation_iterator, tqdm_error_rates
|
300 |
-
)
|
301 |
-
if error_rates:
|
302 |
-
WER = error_rates["WER"]
|
303 |
-
DER = error_rates["DER"]
|
304 |
-
DER1 = error_rates["DER*"]
|
305 |
-
WER1 = error_rates["WER*"]
|
306 |
-
|
307 |
-
self.summary_manager.add_scalar(
|
308 |
-
"error_rates/WER",
|
309 |
-
WER / 100,
|
310 |
-
global_step=self.global_step,
|
311 |
-
)
|
312 |
-
self.summary_manager.add_scalar(
|
313 |
-
"error_rates/DER",
|
314 |
-
DER / 100,
|
315 |
-
global_step=self.global_step,
|
316 |
-
)
|
317 |
-
self.summary_manager.add_scalar(
|
318 |
-
"error_rates/DER*",
|
319 |
-
DER1 / 100,
|
320 |
-
global_step=self.global_step,
|
321 |
-
)
|
322 |
-
self.summary_manager.add_scalar(
|
323 |
-
"error_rates/WER*",
|
324 |
-
WER1 / 100,
|
325 |
-
global_step=self.global_step,
|
326 |
-
)
|
327 |
-
|
328 |
-
error_rates = f"DER: {DER}, WER: {WER}, DER*: {DER1}, WER*: {WER1}"
|
329 |
-
tqdm.display(f"WER/DER {self.global_step}: {error_rates}", pos=9)
|
330 |
-
|
331 |
-
for tag, text in summery_texts:
|
332 |
-
self.summary_manager.add_text(tag, text)
|
333 |
-
|
334 |
-
self.model.train()
|
335 |
-
|
336 |
-
if self.global_step % self.config["train_plotting_frequency"] == 0:
|
337 |
-
self.plot_attention(step_results)
|
338 |
-
|
339 |
-
self.report(step_results, tqdm)
|
340 |
-
|
341 |
-
self.global_step += 1
|
342 |
-
if self.global_step > self.config["max_steps"]:
|
343 |
-
print("Training Done.")
|
344 |
-
return
|
345 |
-
|
346 |
-
tqdm.update()
|
347 |
-
|
348 |
-
def run_one_step(self, batch_inputs: Dict[str, torch.Tensor]):
|
349 |
-
batch_inputs["src"] = batch_inputs["src"].to(self.device)
|
350 |
-
batch_inputs["lengths"] = batch_inputs["lengths"].to("cpu")
|
351 |
-
batch_inputs["target"] = batch_inputs["target"].to(self.device)
|
352 |
-
|
353 |
-
outputs = self.model(
|
354 |
-
src=batch_inputs["src"],
|
355 |
-
target=batch_inputs["target"],
|
356 |
-
lengths=batch_inputs["lengths"],
|
357 |
-
)
|
358 |
-
|
359 |
-
predictions = outputs["diacritics"].contiguous()
|
360 |
-
targets = batch_inputs["target"].contiguous()
|
361 |
-
predictions = predictions.view(-1, predictions.shape[-1])
|
362 |
-
targets = targets.view(-1)
|
363 |
-
loss = self.criterion(predictions.to(self.device), targets.to(self.device))
|
364 |
-
outputs.update({"loss": loss})
|
365 |
-
return outputs
|
366 |
-
|
367 |
-
def predict(self, iterator):
|
368 |
-
pass
|
369 |
-
|
370 |
-
def load_model(self, model_path: str = None, load_optimizer: bool = True):
|
371 |
-
with open(
|
372 |
-
self.config_manager.base_dir / f"{self.model_kind}_network.txt", "w"
|
373 |
-
) as file:
|
374 |
-
file.write(str(self.model))
|
375 |
-
|
376 |
-
if model_path is None:
|
377 |
-
last_model_path = self.config_manager.get_last_model_path()
|
378 |
-
if last_model_path is None:
|
379 |
-
self.global_step = 1
|
380 |
-
return
|
381 |
-
else:
|
382 |
-
last_model_path = model_path
|
383 |
-
|
384 |
-
print(f"loading from {last_model_path}")
|
385 |
-
saved_model = torch.load(last_model_path)
|
386 |
-
self.model.load_state_dict(saved_model["model_state_dict"])
|
387 |
-
if load_optimizer:
|
388 |
-
self.optimizer.load_state_dict(saved_model["optimizer_state_dict"])
|
389 |
-
self.global_step = saved_model["global_step"] + 1
|
390 |
-
|
391 |
-
def get_optimizer(self):
|
392 |
-
if self.config["optimizer"] == OptimizerType.Adam:
|
393 |
-
optimizer = optim.Adam(
|
394 |
-
self.model.parameters(),
|
395 |
-
lr=self.config["learning_rate"],
|
396 |
-
betas=(self.config["adam_beta1"], self.config["adam_beta2"]),
|
397 |
-
weight_decay=self.config["weight_decay"],
|
398 |
-
)
|
399 |
-
elif self.config["optimizer"] == OptimizerType.SGD:
|
400 |
-
optimizer = optim.SGD(
|
401 |
-
self.model.parameters(), lr=self.config["learning_rate"], momentum=0.9
|
402 |
-
)
|
403 |
-
else:
|
404 |
-
raise ValueError("Optimizer option is not valid")
|
405 |
-
|
406 |
-
return optimizer
|
407 |
-
|
408 |
-
def get_learning_rate(self):
|
409 |
-
return LearningRateDecay(
|
410 |
-
lr=self.config["learning_rate"],
|
411 |
-
warmup_steps=self.config.get("warmup_steps", 4000.0),
|
412 |
-
)
|
413 |
-
|
414 |
-
def adjust_learning_rate(self, optimizer, global_step):
|
415 |
-
learning_rate = self.get_learning_rate()(global_step=global_step)
|
416 |
-
for param_group in optimizer.param_groups:
|
417 |
-
param_group["lr"] = learning_rate
|
418 |
-
return learning_rate
|
419 |
-
|
420 |
-
def plot_attention(self, results):
|
421 |
-
pass
|
422 |
-
|
423 |
-
def report(self, results, tqdm):
|
424 |
-
pass
|
425 |
-
|
426 |
-
|
427 |
-
class Seq2SeqTrainer(GeneralTrainer):
|
428 |
-
def plot_attention(self, results):
|
429 |
-
plot_alignment(
|
430 |
-
results["attention"][0],
|
431 |
-
str(self.config_manager.plot_dir),
|
432 |
-
self.global_step,
|
433 |
-
)
|
434 |
-
|
435 |
-
self.summary_manager.add_image(
|
436 |
-
"Train/attention",
|
437 |
-
results["attention"][0].unsqueeze(0),
|
438 |
-
global_step=self.global_step,
|
439 |
-
)
|
440 |
-
|
441 |
-
|
442 |
-
class GPTTrainer(GeneralTrainer):
|
443 |
-
pass
|
444 |
-
|
445 |
-
|
446 |
-
class CBHGTrainer(GeneralTrainer):
|
447 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/filedropzone/FileDropZone.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import FileDropZone from '../../../plugins/filedropzone.js';
|
2 |
-
export default FileDropZone;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/LayoutChildren.js
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import ResizeGameObject from '../../../plugins/utils/size/ResizeGameObject.js';
|
2 |
-
import PreLayoutChild from '../basesizer/utils/PreLayoutChild.js';
|
3 |
-
import LayoutChild from '../basesizer/utils/LayoutChild.js';
|
4 |
-
import CheckSize from '../basesizer/utils/CheckSize.js';
|
5 |
-
|
6 |
-
var LayoutChildren = function () {
|
7 |
-
var child, childConfig, padding;
|
8 |
-
var startX = this.innerLeft,
|
9 |
-
startY = this.innerTop;
|
10 |
-
var innerWidth = this.innerWidth,
|
11 |
-
innerHeight = this.innerHeight;
|
12 |
-
var x, y, width, height; // Align zone
|
13 |
-
var childWidth, childHeight;
|
14 |
-
// Layout current page
|
15 |
-
var children = this.sizerChildren;
|
16 |
-
for (var key in children) {
|
17 |
-
child = children[key];
|
18 |
-
if (child.rexSizer.hidden) {
|
19 |
-
continue;
|
20 |
-
}
|
21 |
-
|
22 |
-
childConfig = child.rexSizer;
|
23 |
-
padding = childConfig.padding;
|
24 |
-
|
25 |
-
PreLayoutChild.call(this, child);
|
26 |
-
|
27 |
-
// Set size
|
28 |
-
if (child.isRexSizer) {
|
29 |
-
child.runLayout(
|
30 |
-
this,
|
31 |
-
this.getExpandedChildWidth(child),
|
32 |
-
this.getExpandedChildHeight(child)
|
33 |
-
);
|
34 |
-
CheckSize(child, this);
|
35 |
-
} else {
|
36 |
-
childWidth = undefined;
|
37 |
-
childHeight = undefined;
|
38 |
-
if (childConfig.expandWidth) { // Expand width
|
39 |
-
childWidth = innerWidth - padding.left - padding.right;
|
40 |
-
}
|
41 |
-
if (childConfig.expandHeight) { // Expand height
|
42 |
-
childHeight = innerHeight - padding.top - padding.bottom;
|
43 |
-
}
|
44 |
-
ResizeGameObject(child, childWidth, childHeight);
|
45 |
-
}
|
46 |
-
|
47 |
-
// Set position
|
48 |
-
x = (startX + padding.left);
|
49 |
-
width = innerWidth - padding.left - padding.right;
|
50 |
-
y = (startY + padding.top);
|
51 |
-
height = innerHeight - padding.top - padding.bottom;
|
52 |
-
|
53 |
-
LayoutChild.call(this,
|
54 |
-
child, x, y, width, height, childConfig.align,
|
55 |
-
childConfig.alignOffsetX, childConfig.alignOffsetY
|
56 |
-
);
|
57 |
-
}
|
58 |
-
}
|
59 |
-
|
60 |
-
export default LayoutChildren;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/bin/paper_runfiles/generate_test_celeba-hq.sh
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
#!/usr/bin/env bash
|
2 |
-
|
3 |
-
# paths to data are valid for mml-ws01
|
4 |
-
OUT_DIR="/media/inpainting/paper_data/CelebA-HQ_val_test"
|
5 |
-
|
6 |
-
source "$(dirname $0)/env.sh"
|
7 |
-
|
8 |
-
for datadir in "val" "test"
|
9 |
-
do
|
10 |
-
for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512
|
11 |
-
do
|
12 |
-
"$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-celeba-hq \
|
13 |
-
location.out_dir=$OUT_DIR cropping.out_square_crop=False
|
14 |
-
|
15 |
-
"$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
|
16 |
-
done
|
17 |
-
done
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlgoveraAI/web3-wallet/app.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from ocean_lib.config import Config
|
3 |
-
from ocean_lib.models.btoken import BToken #BToken is ERC20
|
4 |
-
from ocean_lib.ocean.ocean import Ocean
|
5 |
-
from ocean_lib.web3_internal.wallet import Wallet
|
6 |
-
from ocean_lib.web3_internal.currency import from_wei # wei is the smallest denomination of ether e.g. like cents
|
7 |
-
# from ocean_lib.web3_internal.currency import pretty_ether_and_wei
|
8 |
-
from wallet import get_wallet
|
9 |
-
|
10 |
-
config = Config('config.ini')
|
11 |
-
ocean = Ocean(config)
|
12 |
-
|
13 |
-
def wallet(private_key):
|
14 |
-
|
15 |
-
if private_key:
|
16 |
-
mnemonic = None
|
17 |
-
else:
|
18 |
-
account, mnemonic = get_wallet()
|
19 |
-
|
20 |
-
private_key = account.key.hex()
|
21 |
-
|
22 |
-
wallet = Wallet(ocean.web3, private_key, transaction_timeout=20, block_confirmations=config.block_confirmations)
|
23 |
-
address = wallet.address
|
24 |
-
|
25 |
-
OCEAN_token = BToken(ocean.web3, ocean.OCEAN_address)
|
26 |
-
|
27 |
-
eth_balance = from_wei(ocean.web3.eth.get_balance(address))
|
28 |
-
ocean_balance = from_wei(OCEAN_token.balanceOf(address))
|
29 |
-
|
30 |
-
return address, private_key, mnemonic, eth_balance, ocean_balance
|
31 |
-
|
32 |
-
# def wallet(private_key, did):
|
33 |
-
# wallet = Wallet(ocean.web3, private_key, transaction_timeout=20, block_confirmations=config.block_confirmations)
|
34 |
-
# address = wallet.address
|
35 |
-
# OCEAN_token = BToken(ocean.web3, ocean.OCEAN_address)
|
36 |
-
|
37 |
-
# eth_balance = from_wei(ocean.web3.eth.get_balance(wallet.address))
|
38 |
-
# ocean_balance = from_wei(OCEAN_token.balanceOf(wallet.address))
|
39 |
-
|
40 |
-
# asset = ocean.assets.resolve(did)
|
41 |
-
|
42 |
-
# ALG_ddo = ocean.assets.resolve(did)
|
43 |
-
# alg_token = ocean.get_data_token(ALG_ddo.data_token_address)
|
44 |
-
|
45 |
-
# alg_token_balance = pretty_ether_and_wei(alg_token.balanceOf(wallet.address))
|
46 |
-
|
47 |
-
# return address, eth_balance, ocean_balance, alg_token_balance
|
48 |
-
|
49 |
-
description = (
|
50 |
-
"This demo shows the balance of tokens in your Web3 wallet. If you do not have a Web3 wallet, leave the input field empty when running and the app will create a wallet for you. "
|
51 |
-
"A wallet consists of a public and private key. You can think of the public key like your email address and the private key like your password. "
|
52 |
-
"The public key can be easily determined from the private key, but not vice versa. "
|
53 |
-
"The private key is output in the form of both a hexadecimal number and the corresponding mnemonic phrase, which is easier to remember. "
|
54 |
-
"If you want to continue to use the same wallet in future, you should store the private key (and/or the mnemonic phrase, which can be used to recover the private key). "
|
55 |
-
"Then enter the private key to the input field when running the app. "
|
56 |
-
"Do not give your private key to anyone ever. In fact, it is bad practice to store your private key on your PC for wallets that contain tokens with real value. "
|
57 |
-
"However, we are using test tokens on the Ethereum test network (Rinkeby) where the tokens have no real value. "
|
58 |
-
"Initially, your wallet should have no ETH and OCEAN tokens in it. You can then request ETH and OCEAN test tokens by entering your public address into faucets (follow the links at the bottom of the page). "
|
59 |
-
"Then wait about 15 seconds and re-run the app for the same private key. "
|
60 |
-
"This demo uses the Ocean Protocol Python library in the backend. For more information on the advantages of combinining Ocean and HuggingFace, check out the blog post link below. "
|
61 |
-
""
|
62 |
-
)
|
63 |
-
|
64 |
-
# description = (
|
65 |
-
# "This demo shows the balance of algorithm tokens, as well as ETH and OCEAN, in your Web3 wallet (for a given private key). The algorithm tokens will be used to run Algovera apps on HF spaces in future. "
|
66 |
-
# "Currently, you need to export your private key from a MetaMask wallet (we plan to randomly generate a private key in the app and bypass MetaMask in future). "
|
67 |
-
# "For a guide on how to install MetaMask (an extension in your browser), check the link at the bottom of the page. "
|
68 |
-
# "We highly recommend doing this with a wallet that has no real tokens in it. We use a test network (Rinkeby) where the tokens have no real value. "
|
69 |
-
# "After an initial setup, your wallet should have no tokens. You can request ETH and OCEAN test tokens from faucets at the links at the bottom of the page. "
|
70 |
-
# "To buy an algorithm token (using the OCEAN and ETH), you can search for algorithms on the Ocean marketplace (see link at bottom). Make sure to use algorithms that are on the Rinkeby test network (you need to select Rinkeby from the dropdown menu). "
|
71 |
-
# "We have provided a link to our DCGAN model on the test network at the bottom. If you can't see it you are not on the test network. "
|
72 |
-
# "After you buy an algorithm token, you need to locate the DID in the metadata on the marketplace. Then enter it into the input textbox. "
|
73 |
-
# "Later we will add HF Spaces apps to search algorithms and buy algorithm tokens, which you can use to run demos of the algorithms. "
|
74 |
-
# "This demo uses the Ocean Python library in the backend (see link below)."
|
75 |
-
# )
|
76 |
-
|
77 |
-
article = (
|
78 |
-
"<p style='text-align: center'>"
|
79 |
-
"<a href='https://faucet.rinkeby.io/' target='_blank'>1. ETH faucet</a> | "
|
80 |
-
"<a href='https://faucet.rinkeby.oceanprotocol.com/' target='_blank'>2. OCEAN faucet | </a>"
|
81 |
-
"<a href='https://docs.algovera.ai/blog/2022/01/04/Using%20the%20Ocean%20Marketplace%20with%20HuggingFace%20Apps,%20Algorithms%20and%20Datasets' target='_blank'>3. Blog about Ocean Protocol on HuggingFace</a> "
|
82 |
-
"</p>"
|
83 |
-
)
|
84 |
-
|
85 |
-
|
86 |
-
interface = gr.Interface(
|
87 |
-
wallet,
|
88 |
-
[
|
89 |
-
gr.inputs.Textbox(label="Private Key"),
|
90 |
-
],
|
91 |
-
[
|
92 |
-
#gr.outputs.Textbox(label="Public Key"),
|
93 |
-
#gr.outputs.Textbox(label="Algorithm token balance"),
|
94 |
-
gr.outputs.Textbox(label="Public Address"),
|
95 |
-
gr.outputs.Textbox(label="Private Key"),
|
96 |
-
gr.outputs.Textbox(label="Recovery Passphrase"),
|
97 |
-
gr.outputs.Textbox(label="ETH balance"),
|
98 |
-
gr.outputs.Textbox(label="OCEAN balance"),
|
99 |
-
],
|
100 |
-
title="Web3 Wallet",
|
101 |
-
description=description,
|
102 |
-
article=article,
|
103 |
-
theme="huggingface",
|
104 |
-
)
|
105 |
-
|
106 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/yin.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
# remove np from https://github.com/dhchoi99/NANSY/blob/master/models/yin.py
|
2 |
-
# adapted from https://github.com/patriceguyot/Yin
|
3 |
-
# https://github.com/NVIDIA/mellotron/blob/master/yin.py
|
4 |
-
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
|
9 |
-
def differenceFunction(x, N, tau_max):
|
10 |
-
"""
|
11 |
-
Compute difference function of data x. This corresponds to equation (6) in [1]
|
12 |
-
This solution is implemented directly with torch rfft.
|
13 |
-
|
14 |
-
|
15 |
-
:param x: audio data (Tensor)
|
16 |
-
:param N: length of data
|
17 |
-
:param tau_max: integration window size
|
18 |
-
:return: difference function
|
19 |
-
:rtype: list
|
20 |
-
"""
|
21 |
-
|
22 |
-
# x = np.array(x, np.float64) #[B,T]
|
23 |
-
assert x.dim() == 2
|
24 |
-
b, w = x.shape
|
25 |
-
if w < tau_max:
|
26 |
-
x = F.pad(x, (tau_max - w - (tau_max - w) // 2, (tau_max - w) // 2),
|
27 |
-
'constant',
|
28 |
-
mode='reflect')
|
29 |
-
w = tau_max
|
30 |
-
# x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
|
31 |
-
x_cumsum = torch.cat(
|
32 |
-
[torch.zeros([b, 1], device=x.device), (x * x).cumsum(dim=1)], dim=1)
|
33 |
-
size = w + tau_max
|
34 |
-
p2 = (size // 32).bit_length()
|
35 |
-
# p2 = ceil(log2(size+1 // 32))
|
36 |
-
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
|
37 |
-
size_pad = min(n * 2 ** p2 for n in nice_numbers if n * 2 ** p2 >= size)
|
38 |
-
fc = torch.fft.rfft(x, size_pad) # [B,F]
|
39 |
-
conv = torch.fft.irfft(fc * fc.conj())[:, :tau_max]
|
40 |
-
return x_cumsum[:, w:w - tau_max:
|
41 |
-
-1] + x_cumsum[:, w] - x_cumsum[:, :tau_max] - 2 * conv
|
42 |
-
|
43 |
-
|
44 |
-
def differenceFunction_np(x, N, tau_max):
|
45 |
-
"""
|
46 |
-
Compute difference function of data x. This corresponds to equation (6) in [1]
|
47 |
-
This solution is implemented directly with Numpy fft.
|
48 |
-
|
49 |
-
|
50 |
-
:param x: audio data
|
51 |
-
:param N: length of data
|
52 |
-
:param tau_max: integration window size
|
53 |
-
:return: difference function
|
54 |
-
:rtype: list
|
55 |
-
"""
|
56 |
-
|
57 |
-
x = np.array(x, np.float64)
|
58 |
-
w = x.size
|
59 |
-
tau_max = min(tau_max, w)
|
60 |
-
x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
|
61 |
-
size = w + tau_max
|
62 |
-
p2 = (size // 32).bit_length()
|
63 |
-
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
|
64 |
-
size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)
|
65 |
-
fc = np.fft.rfft(x, size_pad)
|
66 |
-
conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]
|
67 |
-
return x_cumsum[w:w -
|
68 |
-
tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - 2 * conv
|
69 |
-
|
70 |
-
|
71 |
-
def cumulativeMeanNormalizedDifferenceFunction(df, N, eps=1e-8):
|
72 |
-
"""
|
73 |
-
Compute cumulative mean normalized difference function (CMND).
|
74 |
-
|
75 |
-
This corresponds to equation (8) in [1]
|
76 |
-
|
77 |
-
:param df: Difference function
|
78 |
-
:param N: length of data
|
79 |
-
:return: cumulative mean normalized difference function
|
80 |
-
:rtype: list
|
81 |
-
"""
|
82 |
-
# np.seterr(divide='ignore', invalid='ignore')
|
83 |
-
# scipy method, assert df>0 for all element
|
84 |
-
# cmndf = df[1:] * np.asarray(list(range(1, N))) / (np.cumsum(df[1:]).astype(float) + eps)
|
85 |
-
B, _ = df.shape
|
86 |
-
cmndf = df[:,
|
87 |
-
1:] * torch.arange(1, N, device=df.device, dtype=df.dtype).view(
|
88 |
-
1, -1) / (df[:, 1:].cumsum(dim=-1) + eps)
|
89 |
-
return torch.cat(
|
90 |
-
[torch.ones([B, 1], device=df.device, dtype=df.dtype), cmndf], dim=-1)
|
91 |
-
|
92 |
-
|
93 |
-
def differenceFunctionTorch(xs: torch.Tensor, N, tau_max) -> torch.Tensor:
|
94 |
-
"""pytorch backend batch-wise differenceFunction
|
95 |
-
has 1e-4 level error with input shape of (32, 22050*1.5)
|
96 |
-
Args:
|
97 |
-
xs:
|
98 |
-
N:
|
99 |
-
tau_max:
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
|
103 |
-
"""
|
104 |
-
xs = xs.double()
|
105 |
-
w = xs.shape[-1]
|
106 |
-
tau_max = min(tau_max, w)
|
107 |
-
zeros = torch.zeros((xs.shape[0], 1))
|
108 |
-
x_cumsum = torch.cat((torch.zeros((xs.shape[0], 1), device=xs.device),
|
109 |
-
(xs * xs).cumsum(dim=-1, dtype=torch.double)),
|
110 |
-
dim=-1) # B x w
|
111 |
-
size = w + tau_max
|
112 |
-
p2 = (size // 32).bit_length()
|
113 |
-
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
|
114 |
-
size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)
|
115 |
-
|
116 |
-
fcs = torch.fft.rfft(xs, n=size_pad, dim=-1)
|
117 |
-
convs = torch.fft.irfft(fcs * fcs.conj())[:, :tau_max]
|
118 |
-
y1 = torch.flip(x_cumsum[:, w - tau_max + 1:w + 1], dims=[-1])
|
119 |
-
y = y1 + x_cumsum[:, w].unsqueeze(-1) - x_cumsum[:, :tau_max] - 2 * convs
|
120 |
-
return y
|
121 |
-
|
122 |
-
|
123 |
-
def cumulativeMeanNormalizedDifferenceFunctionTorch(dfs: torch.Tensor,
|
124 |
-
N,
|
125 |
-
eps=1e-8) -> torch.Tensor:
|
126 |
-
arange = torch.arange(1, N, device=dfs.device, dtype=torch.float64)
|
127 |
-
cumsum = torch.cumsum(dfs[:, 1:], dim=-1,
|
128 |
-
dtype=torch.float64).to(dfs.device)
|
129 |
-
|
130 |
-
cmndfs = dfs[:, 1:] * arange / (cumsum + eps)
|
131 |
-
cmndfs = torch.cat(
|
132 |
-
(torch.ones(cmndfs.shape[0], 1, device=dfs.device), cmndfs), dim=-1)
|
133 |
-
return cmndfs
|
134 |
-
|
135 |
-
|
136 |
-
if __name__ == '__main__':
|
137 |
-
wav = torch.randn(32, int(22050 * 1.5)).cuda()
|
138 |
-
wav_numpy = wav.detach().cpu().numpy()
|
139 |
-
x = wav_numpy[0]
|
140 |
-
|
141 |
-
w_len = 2048
|
142 |
-
w_step = 256
|
143 |
-
tau_max = 2048
|
144 |
-
W = 2048
|
145 |
-
|
146 |
-
startFrames = list(range(0, x.shape[-1] - w_len, w_step))
|
147 |
-
startFrames = np.asarray(startFrames)
|
148 |
-
# times = startFrames / sr
|
149 |
-
frames = [x[..., t:t + W] for t in startFrames]
|
150 |
-
frames = np.asarray(frames)
|
151 |
-
frames_torch = torch.from_numpy(frames).cuda()
|
152 |
-
|
153 |
-
cmndfs0 = []
|
154 |
-
for idx, frame in enumerate(frames):
|
155 |
-
df = differenceFunction(frame, frame.shape[-1], tau_max)
|
156 |
-
cmndf = cumulativeMeanNormalizedDifferenceFunction(df, tau_max)
|
157 |
-
cmndfs0.append(cmndf)
|
158 |
-
cmndfs0 = np.asarray(cmndfs0)
|
159 |
-
|
160 |
-
dfs = differenceFunctionTorch(frames_torch, frames_torch.shape[-1],
|
161 |
-
tau_max)
|
162 |
-
cmndfs1 = cumulativeMeanNormalizedDifferenceFunctionTorch(
|
163 |
-
dfs, tau_max).detach().cpu().numpy()
|
164 |
-
print(cmndfs0.shape, cmndfs1.shape)
|
165 |
-
print(np.sum(np.abs(cmndfs0 - cmndfs1)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/__init__.py
DELETED
File without changes
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/inpaint.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Text-guided 이미지 인페인팅(inpainting)
|
14 |
-
|
15 |
-
[[코랩에서 열기]]
|
16 |
-
|
17 |
-
[`StableDiffusionInpaintPipeline`]은 마스크와 텍스트 프롬프트를 제공하여 이미지의 특정 부분을 편집할 수 있도록 합니다. 이 기능은 인페인팅 작업을 위해 특별히 훈련된 [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)과 같은 Stable Diffusion 버전을 사용합니다.
|
18 |
-
|
19 |
-
먼저 [`StableDiffusionInpaintPipeline`] 인스턴스를 불러옵니다:
|
20 |
-
|
21 |
-
```python
|
22 |
-
import PIL
|
23 |
-
import requests
|
24 |
-
import torch
|
25 |
-
from io import BytesIO
|
26 |
-
|
27 |
-
from diffusers import StableDiffusionInpaintPipeline
|
28 |
-
|
29 |
-
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
|
30 |
-
"runwayml/stable-diffusion-inpainting",
|
31 |
-
torch_dtype=torch.float16,
|
32 |
-
)
|
33 |
-
pipeline = pipeline.to("cuda")
|
34 |
-
```
|
35 |
-
|
36 |
-
나중에 교체할 강아지 이미지와 마스크를 다운로드하세요:
|
37 |
-
|
38 |
-
```python
|
39 |
-
def download_image(url):
|
40 |
-
response = requests.get(url)
|
41 |
-
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
42 |
-
|
43 |
-
|
44 |
-
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
45 |
-
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
46 |
-
|
47 |
-
init_image = download_image(img_url).resize((512, 512))
|
48 |
-
mask_image = download_image(mask_url).resize((512, 512))
|
49 |
-
```
|
50 |
-
|
51 |
-
이제 마스크를 다른 것으로 교체하라는 프롬프트를 만들 수 있습니다:
|
52 |
-
|
53 |
-
```python
|
54 |
-
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
55 |
-
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
|
56 |
-
```
|
57 |
-
|
58 |
-
`image` | `mask_image` | `prompt` | output |
|
59 |
-
:-------------------------:|:-------------------------:|:-------------------------:|-------------------------:|
|
60 |
-
<img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="250"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="250"/> | ***Face of a yellow cat, high resolution, sitting on a park bench*** | <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint/yellow_cat_sitting_on_a_park_bench.png" alt="drawing" width="250"/> |
|
61 |
-
|
62 |
-
<Tip warning={true}>
|
63 |
-
|
64 |
-
이전의 실험적인 인페인팅 구현에서는 품질이 낮은 다른 프로세스를 사용했습니다. 이전 버전과의 호환성을 보장하기 위해 새 모델이 포함되지 않은 사전학습된 파이프라인을 불러오면 이전 인페인팅 방법이 계속 적용됩니다.
|
65 |
-
|
66 |
-
</Tip>
|
67 |
-
|
68 |
-
아래 Space에서 이미지 인페인팅을 직접 해보세요!
|
69 |
-
|
70 |
-
<iframe
|
71 |
-
src="https://runwayml-stable-diffusion-inpainting.hf.space"
|
72 |
-
frameborder="0"
|
73 |
-
width="850"
|
74 |
-
height="500"
|
75 |
-
></iframe>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py
DELETED
@@ -1,265 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import argparse
|
16 |
-
import os
|
17 |
-
import shutil
|
18 |
-
from pathlib import Path
|
19 |
-
|
20 |
-
import onnx
|
21 |
-
import torch
|
22 |
-
from packaging import version
|
23 |
-
from torch.onnx import export
|
24 |
-
|
25 |
-
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
|
26 |
-
|
27 |
-
|
28 |
-
is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
|
29 |
-
|
30 |
-
|
31 |
-
def onnx_export(
|
32 |
-
model,
|
33 |
-
model_args: tuple,
|
34 |
-
output_path: Path,
|
35 |
-
ordered_input_names,
|
36 |
-
output_names,
|
37 |
-
dynamic_axes,
|
38 |
-
opset,
|
39 |
-
use_external_data_format=False,
|
40 |
-
):
|
41 |
-
output_path.parent.mkdir(parents=True, exist_ok=True)
|
42 |
-
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
|
43 |
-
# so we check the torch version for backwards compatibility
|
44 |
-
if is_torch_less_than_1_11:
|
45 |
-
export(
|
46 |
-
model,
|
47 |
-
model_args,
|
48 |
-
f=output_path.as_posix(),
|
49 |
-
input_names=ordered_input_names,
|
50 |
-
output_names=output_names,
|
51 |
-
dynamic_axes=dynamic_axes,
|
52 |
-
do_constant_folding=True,
|
53 |
-
use_external_data_format=use_external_data_format,
|
54 |
-
enable_onnx_checker=True,
|
55 |
-
opset_version=opset,
|
56 |
-
)
|
57 |
-
else:
|
58 |
-
export(
|
59 |
-
model,
|
60 |
-
model_args,
|
61 |
-
f=output_path.as_posix(),
|
62 |
-
input_names=ordered_input_names,
|
63 |
-
output_names=output_names,
|
64 |
-
dynamic_axes=dynamic_axes,
|
65 |
-
do_constant_folding=True,
|
66 |
-
opset_version=opset,
|
67 |
-
)
|
68 |
-
|
69 |
-
|
70 |
-
@torch.no_grad()
|
71 |
-
def convert_models(model_path: str, output_path: str, opset: int, fp16: bool = False):
|
72 |
-
dtype = torch.float16 if fp16 else torch.float32
|
73 |
-
if fp16 and torch.cuda.is_available():
|
74 |
-
device = "cuda"
|
75 |
-
elif fp16 and not torch.cuda.is_available():
|
76 |
-
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
|
77 |
-
else:
|
78 |
-
device = "cpu"
|
79 |
-
pipeline = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype).to(device)
|
80 |
-
output_path = Path(output_path)
|
81 |
-
|
82 |
-
# TEXT ENCODER
|
83 |
-
num_tokens = pipeline.text_encoder.config.max_position_embeddings
|
84 |
-
text_hidden_size = pipeline.text_encoder.config.hidden_size
|
85 |
-
text_input = pipeline.tokenizer(
|
86 |
-
"A sample prompt",
|
87 |
-
padding="max_length",
|
88 |
-
max_length=pipeline.tokenizer.model_max_length,
|
89 |
-
truncation=True,
|
90 |
-
return_tensors="pt",
|
91 |
-
)
|
92 |
-
onnx_export(
|
93 |
-
pipeline.text_encoder,
|
94 |
-
# casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files
|
95 |
-
model_args=(text_input.input_ids.to(device=device, dtype=torch.int32)),
|
96 |
-
output_path=output_path / "text_encoder" / "model.onnx",
|
97 |
-
ordered_input_names=["input_ids"],
|
98 |
-
output_names=["last_hidden_state", "pooler_output"],
|
99 |
-
dynamic_axes={
|
100 |
-
"input_ids": {0: "batch", 1: "sequence"},
|
101 |
-
},
|
102 |
-
opset=opset,
|
103 |
-
)
|
104 |
-
del pipeline.text_encoder
|
105 |
-
|
106 |
-
# UNET
|
107 |
-
unet_in_channels = pipeline.unet.config.in_channels
|
108 |
-
unet_sample_size = pipeline.unet.config.sample_size
|
109 |
-
unet_path = output_path / "unet" / "model.onnx"
|
110 |
-
onnx_export(
|
111 |
-
pipeline.unet,
|
112 |
-
model_args=(
|
113 |
-
torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
|
114 |
-
torch.randn(2).to(device=device, dtype=dtype),
|
115 |
-
torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype),
|
116 |
-
False,
|
117 |
-
),
|
118 |
-
output_path=unet_path,
|
119 |
-
ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"],
|
120 |
-
output_names=["out_sample"], # has to be different from "sample" for correct tracing
|
121 |
-
dynamic_axes={
|
122 |
-
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
|
123 |
-
"timestep": {0: "batch"},
|
124 |
-
"encoder_hidden_states": {0: "batch", 1: "sequence"},
|
125 |
-
},
|
126 |
-
opset=opset,
|
127 |
-
use_external_data_format=True, # UNet is > 2GB, so the weights need to be split
|
128 |
-
)
|
129 |
-
unet_model_path = str(unet_path.absolute().as_posix())
|
130 |
-
unet_dir = os.path.dirname(unet_model_path)
|
131 |
-
unet = onnx.load(unet_model_path)
|
132 |
-
# clean up existing tensor files
|
133 |
-
shutil.rmtree(unet_dir)
|
134 |
-
os.mkdir(unet_dir)
|
135 |
-
# collate external tensor files into one
|
136 |
-
onnx.save_model(
|
137 |
-
unet,
|
138 |
-
unet_model_path,
|
139 |
-
save_as_external_data=True,
|
140 |
-
all_tensors_to_one_file=True,
|
141 |
-
location="weights.pb",
|
142 |
-
convert_attribute=False,
|
143 |
-
)
|
144 |
-
del pipeline.unet
|
145 |
-
|
146 |
-
# VAE ENCODER
|
147 |
-
vae_encoder = pipeline.vae
|
148 |
-
vae_in_channels = vae_encoder.config.in_channels
|
149 |
-
vae_sample_size = vae_encoder.config.sample_size
|
150 |
-
# need to get the raw tensor output (sample) from the encoder
|
151 |
-
vae_encoder.forward = lambda sample, return_dict: vae_encoder.encode(sample, return_dict)[0].sample()
|
152 |
-
onnx_export(
|
153 |
-
vae_encoder,
|
154 |
-
model_args=(
|
155 |
-
torch.randn(1, vae_in_channels, vae_sample_size, vae_sample_size).to(device=device, dtype=dtype),
|
156 |
-
False,
|
157 |
-
),
|
158 |
-
output_path=output_path / "vae_encoder" / "model.onnx",
|
159 |
-
ordered_input_names=["sample", "return_dict"],
|
160 |
-
output_names=["latent_sample"],
|
161 |
-
dynamic_axes={
|
162 |
-
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
|
163 |
-
},
|
164 |
-
opset=opset,
|
165 |
-
)
|
166 |
-
|
167 |
-
# VAE DECODER
|
168 |
-
vae_decoder = pipeline.vae
|
169 |
-
vae_latent_channels = vae_decoder.config.latent_channels
|
170 |
-
vae_out_channels = vae_decoder.config.out_channels
|
171 |
-
# forward only through the decoder part
|
172 |
-
vae_decoder.forward = vae_encoder.decode
|
173 |
-
onnx_export(
|
174 |
-
vae_decoder,
|
175 |
-
model_args=(
|
176 |
-
torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
|
177 |
-
False,
|
178 |
-
),
|
179 |
-
output_path=output_path / "vae_decoder" / "model.onnx",
|
180 |
-
ordered_input_names=["latent_sample", "return_dict"],
|
181 |
-
output_names=["sample"],
|
182 |
-
dynamic_axes={
|
183 |
-
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
|
184 |
-
},
|
185 |
-
opset=opset,
|
186 |
-
)
|
187 |
-
del pipeline.vae
|
188 |
-
|
189 |
-
# SAFETY CHECKER
|
190 |
-
if pipeline.safety_checker is not None:
|
191 |
-
safety_checker = pipeline.safety_checker
|
192 |
-
clip_num_channels = safety_checker.config.vision_config.num_channels
|
193 |
-
clip_image_size = safety_checker.config.vision_config.image_size
|
194 |
-
safety_checker.forward = safety_checker.forward_onnx
|
195 |
-
onnx_export(
|
196 |
-
pipeline.safety_checker,
|
197 |
-
model_args=(
|
198 |
-
torch.randn(
|
199 |
-
1,
|
200 |
-
clip_num_channels,
|
201 |
-
clip_image_size,
|
202 |
-
clip_image_size,
|
203 |
-
).to(device=device, dtype=dtype),
|
204 |
-
torch.randn(1, vae_sample_size, vae_sample_size, vae_out_channels).to(device=device, dtype=dtype),
|
205 |
-
),
|
206 |
-
output_path=output_path / "safety_checker" / "model.onnx",
|
207 |
-
ordered_input_names=["clip_input", "images"],
|
208 |
-
output_names=["out_images", "has_nsfw_concepts"],
|
209 |
-
dynamic_axes={
|
210 |
-
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
|
211 |
-
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
|
212 |
-
},
|
213 |
-
opset=opset,
|
214 |
-
)
|
215 |
-
del pipeline.safety_checker
|
216 |
-
safety_checker = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
|
217 |
-
feature_extractor = pipeline.feature_extractor
|
218 |
-
else:
|
219 |
-
safety_checker = None
|
220 |
-
feature_extractor = None
|
221 |
-
|
222 |
-
onnx_pipeline = OnnxStableDiffusionPipeline(
|
223 |
-
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder"),
|
224 |
-
vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder"),
|
225 |
-
text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder"),
|
226 |
-
tokenizer=pipeline.tokenizer,
|
227 |
-
unet=OnnxRuntimeModel.from_pretrained(output_path / "unet"),
|
228 |
-
scheduler=pipeline.scheduler,
|
229 |
-
safety_checker=safety_checker,
|
230 |
-
feature_extractor=feature_extractor,
|
231 |
-
requires_safety_checker=safety_checker is not None,
|
232 |
-
)
|
233 |
-
|
234 |
-
onnx_pipeline.save_pretrained(output_path)
|
235 |
-
print("ONNX pipeline saved to", output_path)
|
236 |
-
|
237 |
-
del pipeline
|
238 |
-
del onnx_pipeline
|
239 |
-
_ = OnnxStableDiffusionPipeline.from_pretrained(output_path, provider="CPUExecutionProvider")
|
240 |
-
print("ONNX pipeline is loadable")
|
241 |
-
|
242 |
-
|
243 |
-
if __name__ == "__main__":
|
244 |
-
parser = argparse.ArgumentParser()
|
245 |
-
|
246 |
-
parser.add_argument(
|
247 |
-
"--model_path",
|
248 |
-
type=str,
|
249 |
-
required=True,
|
250 |
-
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
|
251 |
-
)
|
252 |
-
|
253 |
-
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
|
254 |
-
|
255 |
-
parser.add_argument(
|
256 |
-
"--opset",
|
257 |
-
default=14,
|
258 |
-
type=int,
|
259 |
-
help="The version of the ONNX operator set to use.",
|
260 |
-
)
|
261 |
-
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
|
262 |
-
|
263 |
-
args = parser.parse_args()
|
264 |
-
|
265 |
-
convert_models(args.model_path, args.output_path, args.opset, args.fp16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pafpn/README.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
# Path Aggregation Network for Instance Segmentation
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```
|
8 |
-
@inproceedings{liu2018path,
|
9 |
-
author = {Shu Liu and
|
10 |
-
Lu Qi and
|
11 |
-
Haifang Qin and
|
12 |
-
Jianping Shi and
|
13 |
-
Jiaya Jia},
|
14 |
-
title = {Path Aggregation Network for Instance Segmentation},
|
15 |
-
booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
|
16 |
-
year = {2018}
|
17 |
-
}
|
18 |
-
```
|
19 |
-
|
20 |
-
## Results and Models
|
21 |
-
|
22 |
-
## Results and Models
|
23 |
-
|
24 |
-
| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
25 |
-
|:-------------:|:----------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
|
26 |
-
| R-50-FPN | pytorch | 1x | 4.0 | 17.2 | 37.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_20200503_105836.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/datasets/coco_detection.py',
|
3 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
4 |
-
]
|
5 |
-
num_stages = 6
|
6 |
-
num_proposals = 100
|
7 |
-
model = dict(
|
8 |
-
type='SparseRCNN',
|
9 |
-
pretrained='torchvision://resnet50',
|
10 |
-
backbone=dict(
|
11 |
-
type='ResNet',
|
12 |
-
depth=50,
|
13 |
-
num_stages=4,
|
14 |
-
out_indices=(0, 1, 2, 3),
|
15 |
-
frozen_stages=1,
|
16 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
17 |
-
norm_eval=True,
|
18 |
-
style='pytorch'),
|
19 |
-
neck=dict(
|
20 |
-
type='FPN',
|
21 |
-
in_channels=[256, 512, 1024, 2048],
|
22 |
-
out_channels=256,
|
23 |
-
start_level=0,
|
24 |
-
add_extra_convs='on_input',
|
25 |
-
num_outs=4),
|
26 |
-
rpn_head=dict(
|
27 |
-
type='EmbeddingRPNHead',
|
28 |
-
num_proposals=num_proposals,
|
29 |
-
proposal_feature_channel=256),
|
30 |
-
roi_head=dict(
|
31 |
-
type='SparseRoIHead',
|
32 |
-
num_stages=num_stages,
|
33 |
-
stage_loss_weights=[1] * num_stages,
|
34 |
-
proposal_feature_channel=256,
|
35 |
-
bbox_roi_extractor=dict(
|
36 |
-
type='SingleRoIExtractor',
|
37 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
|
38 |
-
out_channels=256,
|
39 |
-
featmap_strides=[4, 8, 16, 32]),
|
40 |
-
bbox_head=[
|
41 |
-
dict(
|
42 |
-
type='DIIHead',
|
43 |
-
num_classes=80,
|
44 |
-
num_ffn_fcs=2,
|
45 |
-
num_heads=8,
|
46 |
-
num_cls_fcs=1,
|
47 |
-
num_reg_fcs=3,
|
48 |
-
feedforward_channels=2048,
|
49 |
-
in_channels=256,
|
50 |
-
dropout=0.0,
|
51 |
-
ffn_act_cfg=dict(type='ReLU', inplace=True),
|
52 |
-
dynamic_conv_cfg=dict(
|
53 |
-
type='DynamicConv',
|
54 |
-
in_channels=256,
|
55 |
-
feat_channels=64,
|
56 |
-
out_channels=256,
|
57 |
-
input_feat_shape=7,
|
58 |
-
act_cfg=dict(type='ReLU', inplace=True),
|
59 |
-
norm_cfg=dict(type='LN')),
|
60 |
-
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
|
61 |
-
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
|
62 |
-
loss_cls=dict(
|
63 |
-
type='FocalLoss',
|
64 |
-
use_sigmoid=True,
|
65 |
-
gamma=2.0,
|
66 |
-
alpha=0.25,
|
67 |
-
loss_weight=2.0),
|
68 |
-
bbox_coder=dict(
|
69 |
-
type='DeltaXYWHBBoxCoder',
|
70 |
-
clip_border=False,
|
71 |
-
target_means=[0., 0., 0., 0.],
|
72 |
-
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
|
73 |
-
]),
|
74 |
-
# training and testing settings
|
75 |
-
train_cfg=dict(
|
76 |
-
rpn=None,
|
77 |
-
rcnn=[
|
78 |
-
dict(
|
79 |
-
assigner=dict(
|
80 |
-
type='HungarianAssigner',
|
81 |
-
cls_cost=dict(type='FocalLossCost', weight=2.0),
|
82 |
-
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
|
83 |
-
iou_cost=dict(type='IoUCost', iou_mode='giou',
|
84 |
-
weight=2.0)),
|
85 |
-
sampler=dict(type='PseudoSampler'),
|
86 |
-
pos_weight=1) for _ in range(num_stages)
|
87 |
-
]),
|
88 |
-
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
|
89 |
-
|
90 |
-
# optimizer
|
91 |
-
optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001)
|
92 |
-
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2))
|
93 |
-
# learning policy
|
94 |
-
lr_config = dict(policy='step', step=[8, 11])
|
95 |
-
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './encnet_r50-d8_512x512_40k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/README.md
DELETED
@@ -1,420 +0,0 @@
|
|
1 |
-
**Breaking change: WebUI now uses PyTorch 2.1.**
|
2 |
-
|
3 |
-
* For one-click installer users: If you encounter problems after updating, rerun the update script. If issues persist, delete the `installer_files` folder and use the start script to reinstall requirements.
|
4 |
-
* For manual installations, update PyTorch with the [provided command](https://github.com/oobabooga/text-generation-webui/#2-install-pytorch).
|
5 |
-
|
6 |
-
# Text generation web UI
|
7 |
-
|
8 |
-
A Gradio web UI for Large Language Models.
|
9 |
-
|
10 |
-
Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) of text generation.
|
11 |
-
|
12 |
-
| |  |
|
13 |
-
|:---:|:---:|
|
14 |
-
| |  |
|
15 |
-
|
16 |
-
## Features
|
17 |
-
|
18 |
-
* 3 interface modes: default (two columns), notebook, and chat
|
19 |
-
* Multiple model backends: [transformers](https://github.com/huggingface/transformers), [llama.cpp](https://github.com/ggerganov/llama.cpp), [ExLlama](https://github.com/turboderp/exllama), [ExLlamaV2](https://github.com/turboderp/exllamav2), [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa), [CTransformers](https://github.com/marella/ctransformers), [AutoAWQ](https://github.com/casper-hansen/AutoAWQ)
|
20 |
-
* Dropdown menu for quickly switching between different models
|
21 |
-
* LoRA: load and unload LoRAs on the fly, train a new LoRA using QLoRA
|
22 |
-
* Precise instruction templates for chat mode, including Llama-2-chat, Alpaca, Vicuna, WizardLM, StableLM, and many others
|
23 |
-
* 4-bit, 8-bit, and CPU inference through the transformers library
|
24 |
-
* Use llama.cpp models with transformers samplers (`llamacpp_HF` loader)
|
25 |
-
* [Multimodal pipelines, including LLaVA and MiniGPT-4](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal)
|
26 |
-
* [Extensions framework](docs/Extensions.md)
|
27 |
-
* [Custom chat characters](docs/Chat-mode.md)
|
28 |
-
* Very efficient text streaming
|
29 |
-
* Markdown output with LaTeX rendering, to use for instance with [GALACTICA](https://github.com/paperswithcode/galai)
|
30 |
-
* API, including endpoints for websocket streaming ([see the examples](https://github.com/oobabooga/text-generation-webui/blob/main/api-examples))
|
31 |
-
|
32 |
-
To learn how to use the various features, check out the Documentation: https://github.com/oobabooga/text-generation-webui/tree/main/docs
|
33 |
-
|
34 |
-
## Installation
|
35 |
-
|
36 |
-
### One-click installers
|
37 |
-
|
38 |
-
1) Clone or download the repository.
|
39 |
-
2) Run the `start_linux.sh`, `start_windows.bat`, `start_macos.sh`, or `start_wsl.bat` script depending on your OS.
|
40 |
-
3) Select your GPU vendor when asked.
|
41 |
-
4) Have fun!
|
42 |
-
|
43 |
-
#### How it works
|
44 |
-
|
45 |
-
The script creates a folder called `installer_files` where it sets up a Conda environment using Miniconda. The installation is self-contained: if you want to reinstall, just delete `installer_files` and run the start script again.
|
46 |
-
|
47 |
-
To launch the webui in the future after it is already installed, run the same `start` script.
|
48 |
-
|
49 |
-
#### Getting updates
|
50 |
-
|
51 |
-
Run `update_linux.sh`, `update_windows.bat`, `update_macos.sh`, or `update_wsl.bat`.
|
52 |
-
|
53 |
-
#### Running commands
|
54 |
-
|
55 |
-
If you ever need to install something manually in the `installer_files` environment, you can launch an interactive shell using the cmd script: `cmd_linux.sh`, `cmd_windows.bat`, `cmd_macos.sh`, or `cmd_wsl.bat`.
|
56 |
-
|
57 |
-
#### Defining command-line flags
|
58 |
-
|
59 |
-
To define persistent command-line flags like `--listen` or `--api`, edit the `CMD_FLAGS.txt` file with a text editor and add them there. Flags can also be provided directly to the start scripts, for instance, `./start-linux.sh --listen`.
|
60 |
-
|
61 |
-
#### Other info
|
62 |
-
|
63 |
-
* There is no need to run any of those scripts as admin/root.
|
64 |
-
* For additional instructions about AMD setup, WSL setup, and nvcc installation, consult [this page](https://github.com/oobabooga/text-generation-webui/blob/main/docs/One-Click-Installers.md).
|
65 |
-
* The installer has been tested mostly on NVIDIA GPUs. If you can find a way to improve it for your AMD/Intel Arc/Mac Metal GPU, you are highly encouraged to submit a PR to this repository. The main file to be edited is `one_click.py`.
|
66 |
-
* For automated installation, you can use the `GPU_CHOICE`, `LAUNCH_AFTER_INSTALL`, and `INSTALL_EXTENSIONS` environment variables. For instance: `GPU_CHOICE=A LAUNCH_AFTER_INSTALL=False INSTALL_EXTENSIONS=False ./start_linux.sh`.
|
67 |
-
|
68 |
-
### Manual installation using Conda
|
69 |
-
|
70 |
-
Recommended if you have some experience with the command-line.
|
71 |
-
|
72 |
-
#### 0. Install Conda
|
73 |
-
|
74 |
-
https://docs.conda.io/en/latest/miniconda.html
|
75 |
-
|
76 |
-
On Linux or WSL, it can be automatically installed with these two commands ([source](https://educe-ubc.github.io/conda.html)):
|
77 |
-
|
78 |
-
```
|
79 |
-
curl -sL "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh" > "Miniconda3.sh"
|
80 |
-
bash Miniconda3.sh
|
81 |
-
```
|
82 |
-
|
83 |
-
#### 1. Create a new conda environment
|
84 |
-
|
85 |
-
```
|
86 |
-
conda create -n textgen python=3.10
|
87 |
-
conda activate textgen
|
88 |
-
```
|
89 |
-
|
90 |
-
#### 2. Install Pytorch
|
91 |
-
|
92 |
-
| System | GPU | Command |
|
93 |
-
|--------|---------|---------|
|
94 |
-
| Linux/WSL | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118` |
|
95 |
-
| Linux/WSL | CPU only | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu` |
|
96 |
-
| Linux | AMD | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6` |
|
97 |
-
| MacOS + MPS | Any | `pip3 install torch torchvision torchaudio` |
|
98 |
-
| Windows | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118` |
|
99 |
-
| Windows | CPU only | `pip3 install torch torchvision torchaudio` |
|
100 |
-
|
101 |
-
The up-to-date commands can be found here: https://pytorch.org/get-started/locally/.
|
102 |
-
|
103 |
-
#### 3. Install the web UI
|
104 |
-
|
105 |
-
```
|
106 |
-
git clone https://github.com/oobabooga/text-generation-webui
|
107 |
-
cd text-generation-webui
|
108 |
-
pip install -r requirements.txt
|
109 |
-
```
|
110 |
-
|
111 |
-
#### AMD, Metal, Intel Arc, and CPUs without AVX2
|
112 |
-
|
113 |
-
1) Replace the last command above with
|
114 |
-
|
115 |
-
```
|
116 |
-
pip install -r requirements_nowheels.txt
|
117 |
-
```
|
118 |
-
|
119 |
-
2) Manually install llama-cpp-python using the appropriate command for your hardware: [Installation from PyPI](https://github.com/abetlen/llama-cpp-python#installation-from-pypi).
|
120 |
-
|
121 |
-
3) Do the same for CTransformers: [Installation](https://github.com/marella/ctransformers#installation).
|
122 |
-
|
123 |
-
4) AMD: Manually install AutoGPTQ: [Installation](https://github.com/PanQiWei/AutoGPTQ#installation).
|
124 |
-
|
125 |
-
5) AMD: Manually install [ExLlama](https://github.com/turboderp/exllama) by simply cloning it into the `repositories` folder (it will be automatically compiled at runtime after that):
|
126 |
-
|
127 |
-
```
|
128 |
-
cd text-generation-webui
|
129 |
-
git clone https://github.com/turboderp/exllama repositories/exllama
|
130 |
-
```
|
131 |
-
|
132 |
-
#### bitsandbytes on older NVIDIA GPUs
|
133 |
-
|
134 |
-
bitsandbytes >= 0.39 may not work. In that case, to use `--load-in-8bit`, you may have to downgrade like this:
|
135 |
-
|
136 |
-
* Linux: `pip install bitsandbytes==0.38.1`
|
137 |
-
* Windows: `pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl`
|
138 |
-
|
139 |
-
### Alternative: Docker
|
140 |
-
|
141 |
-
```
|
142 |
-
ln -s docker/{Dockerfile,docker-compose.yml,.dockerignore} .
|
143 |
-
cp docker/.env.example .env
|
144 |
-
# Edit .env and set TORCH_CUDA_ARCH_LIST based on your GPU model
|
145 |
-
docker compose up --build
|
146 |
-
```
|
147 |
-
|
148 |
-
* You need to have docker compose v2.17 or higher installed. See [this guide](https://github.com/oobabooga/text-generation-webui/blob/main/docs/Docker.md) for instructions.
|
149 |
-
* For additional docker files, check out [this repository](https://github.com/Atinoda/text-generation-webui-docker).
|
150 |
-
|
151 |
-
### Updating the requirements
|
152 |
-
|
153 |
-
From time to time, the `requirements.txt` changes. To update, use these commands:
|
154 |
-
|
155 |
-
```
|
156 |
-
conda activate textgen
|
157 |
-
cd text-generation-webui
|
158 |
-
pip install -r requirements.txt --upgrade
|
159 |
-
```
|
160 |
-
|
161 |
-
## Downloading models
|
162 |
-
|
163 |
-
Models should be placed in the `text-generation-webui/models` folder. They are usually downloaded from [Hugging Face](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads).
|
164 |
-
|
165 |
-
* Transformers or GPTQ models are made of several files and must be placed in a subfolder. Example:
|
166 |
-
|
167 |
-
```
|
168 |
-
text-generation-webui
|
169 |
-
├── models
|
170 |
-
│ ├── lmsys_vicuna-33b-v1.3
|
171 |
-
│ │ ├── config.json
|
172 |
-
│ │ ├── generation_config.json
|
173 |
-
│ │ ├── pytorch_model-00001-of-00007.bin
|
174 |
-
│ │ ├── pytorch_model-00002-of-00007.bin
|
175 |
-
│ │ ├── pytorch_model-00003-of-00007.bin
|
176 |
-
│ │ ├── pytorch_model-00004-of-00007.bin
|
177 |
-
│ │ ├── pytorch_model-00005-of-00007.bin
|
178 |
-
│ │ ├── pytorch_model-00006-of-00007.bin
|
179 |
-
│ │ ├── pytorch_model-00007-of-00007.bin
|
180 |
-
│ │ ├── pytorch_model.bin.index.json
|
181 |
-
│ │ ├── special_tokens_map.json
|
182 |
-
│ │ ├── tokenizer_config.json
|
183 |
-
│ │ └── tokenizer.model
|
184 |
-
```
|
185 |
-
|
186 |
-
* GGUF models are a single file and should be placed directly into `models`. Example:
|
187 |
-
|
188 |
-
```
|
189 |
-
text-generation-webui
|
190 |
-
├── models
|
191 |
-
│ ├── llama-2-13b-chat.Q4_K_M.gguf
|
192 |
-
```
|
193 |
-
|
194 |
-
In both cases, you can use the "Model" tab of the UI to download the model from Hugging Face automatically. It is also possible to download via the command-line with `python download-model.py organization/model` (use `--help` to see all the options).
|
195 |
-
|
196 |
-
#### GPT-4chan
|
197 |
-
|
198 |
-
<details>
|
199 |
-
<summary>
|
200 |
-
Instructions
|
201 |
-
</summary>
|
202 |
-
|
203 |
-
[GPT-4chan](https://huggingface.co/ykilcher/gpt-4chan) has been shut down from Hugging Face, so you need to download it elsewhere. You have two options:
|
204 |
-
|
205 |
-
* Torrent: [16-bit](https://archive.org/details/gpt4chan_model_float16) / [32-bit](https://archive.org/details/gpt4chan_model)
|
206 |
-
* Direct download: [16-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model_float16/) / [32-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model/)
|
207 |
-
|
208 |
-
The 32-bit version is only relevant if you intend to run the model in CPU mode. Otherwise, you should use the 16-bit version.
|
209 |
-
|
210 |
-
After downloading the model, follow these steps:
|
211 |
-
|
212 |
-
1. Place the files under `models/gpt4chan_model_float16` or `models/gpt4chan_model`.
|
213 |
-
2. Place GPT-J 6B's config.json file in that same folder: [config.json](https://huggingface.co/EleutherAI/gpt-j-6B/raw/main/config.json).
|
214 |
-
3. Download GPT-J 6B's tokenizer files (they will be automatically detected when you attempt to load GPT-4chan):
|
215 |
-
|
216 |
-
```
|
217 |
-
python download-model.py EleutherAI/gpt-j-6B --text-only
|
218 |
-
```
|
219 |
-
|
220 |
-
When you load this model in default or notebook modes, the "HTML" tab will show the generated text in 4chan format:
|
221 |
-
|
222 |
-

|
223 |
-
|
224 |
-
</details>
|
225 |
-
|
226 |
-
## Starting the web UI
|
227 |
-
|
228 |
-
conda activate textgen
|
229 |
-
cd text-generation-webui
|
230 |
-
python server.py
|
231 |
-
|
232 |
-
Then browse to
|
233 |
-
|
234 |
-
`http://localhost:7860/?__theme=dark`
|
235 |
-
|
236 |
-
Optionally, you can use the following command-line flags:
|
237 |
-
|
238 |
-
#### Basic settings
|
239 |
-
|
240 |
-
| Flag | Description |
|
241 |
-
|--------------------------------------------|-------------|
|
242 |
-
| `-h`, `--help` | Show this help message and exit. |
|
243 |
-
| `--multi-user` | Multi-user mode. Chat histories are not saved or automatically loaded. WARNING: this is highly experimental. |
|
244 |
-
| `--character CHARACTER` | The name of the character to load in chat mode by default. |
|
245 |
-
| `--model MODEL` | Name of the model to load by default. |
|
246 |
-
| `--lora LORA [LORA ...]` | The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces. |
|
247 |
-
| `--model-dir MODEL_DIR` | Path to directory with all the models. |
|
248 |
-
| `--lora-dir LORA_DIR` | Path to directory with all the loras. |
|
249 |
-
| `--model-menu` | Show a model menu in the terminal when the web UI is first launched. |
|
250 |
-
| `--settings SETTINGS_FILE` | Load the default interface settings from this yaml file. See `settings-template.yaml` for an example. If you create a file called `settings.yaml`, this file will be loaded by default without the need to use the `--settings` flag. |
|
251 |
-
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
|
252 |
-
| `--verbose` | Print the prompts to the terminal. |
|
253 |
-
| `--chat-buttons` | Show buttons on chat tab instead of hover menu. |
|
254 |
-
|
255 |
-
#### Model loader
|
256 |
-
|
257 |
-
| Flag | Description |
|
258 |
-
|--------------------------------------------|-------------|
|
259 |
-
| `--loader LOADER` | Choose the model loader manually, otherwise, it will get autodetected. Valid options: transformers, autogptq, gptq-for-llama, exllama, exllama_hf, llamacpp, rwkv, ctransformers |
|
260 |
-
|
261 |
-
#### Accelerate/transformers
|
262 |
-
|
263 |
-
| Flag | Description |
|
264 |
-
|---------------------------------------------|-------------|
|
265 |
-
| `--cpu` | Use the CPU to generate text. Warning: Training on CPU is extremely slow.|
|
266 |
-
| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU. |
|
267 |
-
| `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maximum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs. You can also set values in MiB like `--gpu-memory 3500MiB`. |
|
268 |
-
| `--cpu-memory CPU_MEMORY` | Maximum CPU memory in GiB to allocate for offloaded weights. Same as above.|
|
269 |
-
| `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |
|
270 |
-
| `--disk-cache-dir DISK_CACHE_DIR` | Directory to save the disk cache to. Defaults to `cache/`. |
|
271 |
-
| `--load-in-8bit` | Load the model with 8-bit precision (using bitsandbytes).|
|
272 |
-
| `--bf16` | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
|
273 |
-
| `--no-cache` | Set `use_cache` to False while generating text. This reduces the VRAM usage a bit with a performance cost. |
|
274 |
-
| `--xformers` | Use xformer's memory efficient attention. This should increase your tokens/s. |
|
275 |
-
| `--sdp-attention` | Use torch 2.0's sdp attention. |
|
276 |
-
| `--trust-remote-code` | Set trust_remote_code=True while loading a model. Necessary for ChatGLM and Falcon. |
|
277 |
-
| `--use_fast` | Set use_fast=True while loading a tokenizer. |
|
278 |
-
|
279 |
-
#### Accelerate 4-bit
|
280 |
-
|
281 |
-
⚠️ Requires minimum compute of 7.0 on Windows at the moment.
|
282 |
-
|
283 |
-
| Flag | Description |
|
284 |
-
|---------------------------------------------|-------------|
|
285 |
-
| `--load-in-4bit` | Load the model with 4-bit precision (using bitsandbytes). |
|
286 |
-
| `--compute_dtype COMPUTE_DTYPE` | compute dtype for 4-bit. Valid options: bfloat16, float16, float32. |
|
287 |
-
| `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. |
|
288 |
-
| `--use_double_quant` | use_double_quant for 4-bit. |
|
289 |
-
|
290 |
-
#### GGUF (for llama.cpp and ctransformers)
|
291 |
-
|
292 |
-
| Flag | Description |
|
293 |
-
|-------------|-------------|
|
294 |
-
| `--threads` | Number of threads to use. |
|
295 |
-
| `--threads-batch THREADS_BATCH` | Number of threads to use for batches/prompt processing. |
|
296 |
-
| `--n_batch` | Maximum number of prompt tokens to batch together when calling llama_eval. |
|
297 |
-
| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. |
|
298 |
-
| `--n_ctx N_CTX` | Size of the prompt context. |
|
299 |
-
|
300 |
-
#### llama.cpp
|
301 |
-
|
302 |
-
| Flag | Description |
|
303 |
-
|---------------|---------------|
|
304 |
-
| `--mul_mat_q` | Activate new mulmat kernels. |
|
305 |
-
| `--tensor_split TENSOR_SPLIT` | Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17 |
|
306 |
-
| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). |
|
307 |
-
| `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. |
|
308 |
-
|`--cfg-cache` | llamacpp_HF: Create an additional cache for CFG negative prompts. |
|
309 |
-
| `--no-mmap` | Prevent mmap from being used. |
|
310 |
-
| `--mlock` | Force the system to keep the model in RAM. |
|
311 |
-
| `--numa` | Activate NUMA task allocation for llama.cpp |
|
312 |
-
| `--cpu` | Use the CPU version of llama-cpp-python instead of the GPU-accelerated version. |
|
313 |
-
|
314 |
-
#### ctransformers
|
315 |
-
|
316 |
-
| Flag | Description |
|
317 |
-
|-------------|-------------|
|
318 |
-
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently gpt2, gptj, gptneox, falcon, llama, mpt, starcoder (gptbigcode), dollyv2, and replit are supported. |
|
319 |
-
|
320 |
-
#### AutoGPTQ
|
321 |
-
|
322 |
-
| Flag | Description |
|
323 |
-
|------------------|-------------|
|
324 |
-
| `--triton` | Use triton. |
|
325 |
-
| `--no_inject_fused_attention` | Disable the use of fused attention, which will use less VRAM at the cost of slower inference. |
|
326 |
-
| `--no_inject_fused_mlp` | Triton mode only: disable the use of fused MLP, which will use less VRAM at the cost of slower inference. |
|
327 |
-
| `--no_use_cuda_fp16` | This can make models faster on some systems. |
|
328 |
-
| `--desc_act` | For models that don't have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig. |
|
329 |
-
| `--disable_exllama` | Disable ExLlama kernel, which can improve inference speed on some systems. |
|
330 |
-
|
331 |
-
#### ExLlama
|
332 |
-
|
333 |
-
| Flag | Description |
|
334 |
-
|------------------|-------------|
|
335 |
-
|`--gpu-split` | Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. `20,7,7` |
|
336 |
-
|`--max_seq_len MAX_SEQ_LEN` | Maximum sequence length. |
|
337 |
-
|`--cfg-cache` | ExLlama_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader, but not necessary for CFG with base ExLlama. |
|
338 |
-
|
339 |
-
#### GPTQ-for-LLaMa
|
340 |
-
|
341 |
-
| Flag | Description |
|
342 |
-
|---------------------------|-------------|
|
343 |
-
| `--wbits WBITS` | Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported. |
|
344 |
-
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. |
|
345 |
-
| `--groupsize GROUPSIZE` | Group size. |
|
346 |
-
| `--pre_layer PRE_LAYER [PRE_LAYER ...]` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg `--pre_layer 30 60`. |
|
347 |
-
| `--checkpoint CHECKPOINT` | The path to the quantized checkpoint file. If not specified, it will be automatically detected. |
|
348 |
-
| `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models.
|
349 |
-
|
350 |
-
#### DeepSpeed
|
351 |
-
|
352 |
-
| Flag | Description |
|
353 |
-
|---------------------------------------|-------------|
|
354 |
-
| `--deepspeed` | Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration. |
|
355 |
-
| `--nvme-offload-dir NVME_OFFLOAD_DIR` | DeepSpeed: Directory to use for ZeRO-3 NVME offloading. |
|
356 |
-
| `--local_rank LOCAL_RANK` | DeepSpeed: Optional argument for distributed setups. |
|
357 |
-
|
358 |
-
#### RWKV
|
359 |
-
|
360 |
-
| Flag | Description |
|
361 |
-
|---------------------------------|-------------|
|
362 |
-
| `--rwkv-strategy RWKV_STRATEGY` | RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8". |
|
363 |
-
| `--rwkv-cuda-on` | RWKV: Compile the CUDA kernel for better performance. |
|
364 |
-
|
365 |
-
#### RoPE (for llama.cpp, ExLlama, ExLlamaV2, and transformers)
|
366 |
-
|
367 |
-
| Flag | Description |
|
368 |
-
|------------------|-------------|
|
369 |
-
| `--alpha_value ALPHA_VALUE` | Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both. |
|
370 |
-
| `--rope_freq_base ROPE_FREQ_BASE` | If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63). |
|
371 |
-
| `--compress_pos_emb COMPRESS_POS_EMB` | Positional embeddings compression factor. Should be set to (context length) / (model's original context length). Equal to 1/rope_freq_scale. |
|
372 |
-
|
373 |
-
#### Gradio
|
374 |
-
|
375 |
-
| Flag | Description |
|
376 |
-
|---------------------------------------|-------------|
|
377 |
-
| `--listen` | Make the web UI reachable from your local network. |
|
378 |
-
| `--listen-host LISTEN_HOST` | The hostname that the server will use. |
|
379 |
-
| `--listen-port LISTEN_PORT` | The listening port that the server will use. |
|
380 |
-
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
|
381 |
-
| `--auto-launch` | Open the web UI in the default browser upon launch. |
|
382 |
-
| `--gradio-auth USER:PWD` | set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3" |
|
383 |
-
| `--gradio-auth-path GRADIO_AUTH_PATH` | Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3" |
|
384 |
-
| `--ssl-keyfile SSL_KEYFILE` | The path to the SSL certificate key file. |
|
385 |
-
| `--ssl-certfile SSL_CERTFILE` | The path to the SSL certificate cert file. |
|
386 |
-
|
387 |
-
#### API
|
388 |
-
|
389 |
-
| Flag | Description |
|
390 |
-
|---------------------------------------|-------------|
|
391 |
-
| `--api` | Enable the API extension. |
|
392 |
-
| `--public-api` | Create a public URL for the API using Cloudfare. |
|
393 |
-
| `--public-api-id PUBLIC_API_ID` | Tunnel ID for named Cloudflare Tunnel. Use together with public-api option. |
|
394 |
-
| `--api-blocking-port BLOCKING_PORT` | The listening port for the blocking API. |
|
395 |
-
| `--api-streaming-port STREAMING_PORT` | The listening port for the streaming API. |
|
396 |
-
|
397 |
-
#### Multimodal
|
398 |
-
|
399 |
-
| Flag | Description |
|
400 |
-
|---------------------------------------|-------------|
|
401 |
-
| `--multimodal-pipeline PIPELINE` | The multimodal pipeline to use. Examples: `llava-7b`, `llava-13b`. |
|
402 |
-
|
403 |
-
## Presets
|
404 |
-
|
405 |
-
Inference settings presets can be created under `presets/` as yaml files. These files are detected automatically at startup.
|
406 |
-
|
407 |
-
The presets that are included by default are the result of a contest that received 7215 votes. More details can be found [here](https://github.com/oobabooga/oobabooga.github.io/blob/main/arena/results.md).
|
408 |
-
|
409 |
-
## Contributing
|
410 |
-
|
411 |
-
If you would like to contribute to the project, check out the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
|
412 |
-
|
413 |
-
## Community
|
414 |
-
|
415 |
-
* Subreddit: https://www.reddit.com/r/oobabooga/
|
416 |
-
* Discord: https://discord.gg/jwZCF2dPQN
|
417 |
-
|
418 |
-
## Acknowledgment
|
419 |
-
|
420 |
-
In August 2023, [Andreessen Horowitz](https://a16z.com/) (a16z) provided a generous grant to encourage and support my independent work on this project. I am **extremely** grateful for their trust and recognition, which will allow me to dedicate more time towards realizing the full potential of text-generation-webui.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/gui/ui_model.py
DELETED
@@ -1,290 +0,0 @@
|
|
1 |
-
from gui.ui_win import Ui_Form
|
2 |
-
from gui.ui_draw import *
|
3 |
-
from PIL import Image, ImageQt
|
4 |
-
import numpy as np
|
5 |
-
import random, io, os
|
6 |
-
import torch
|
7 |
-
import torch.nn.functional as F
|
8 |
-
import torchvision.transforms as transforms
|
9 |
-
from util import task, util
|
10 |
-
from dataloader.image_folder import make_dataset
|
11 |
-
from dataloader.data_loader import get_transform
|
12 |
-
from model import create_model
|
13 |
-
|
14 |
-
|
15 |
-
class ui_model(QtWidgets.QWidget, Ui_Form):
|
16 |
-
"""define the class of UI"""
|
17 |
-
shape = 'line'
|
18 |
-
CurrentWidth = 1
|
19 |
-
|
20 |
-
def __init__(self, opt):
|
21 |
-
super(ui_model, self).__init__()
|
22 |
-
|
23 |
-
self.setupUi(self)
|
24 |
-
|
25 |
-
self.opt = opt
|
26 |
-
self.show_result_flag = False
|
27 |
-
self.mask_type = None
|
28 |
-
self.img_power = None
|
29 |
-
self.model_names = ['celeba', 'ffhq', 'imagenet', 'places2']
|
30 |
-
self.img_root = './examples/'
|
31 |
-
self.img_files = ['celeba/img', 'ffhq/img', 'imagenet/img', 'places2/img']
|
32 |
-
|
33 |
-
self.show_logo()
|
34 |
-
|
35 |
-
self.comboBox.activated.connect(self.load_model) # select model
|
36 |
-
self.pushButton_2.clicked.connect(self.select_image) # manually select an image
|
37 |
-
self.pushButton_3.clicked.connect(self.random_image) # randomly select an image
|
38 |
-
self.pushButton_4.clicked.connect(self.load_mask) # manually select a mask
|
39 |
-
self.pushButton_5.clicked.connect(self.random_mask) # randomly select a mask
|
40 |
-
|
41 |
-
# draw/erasure the mask
|
42 |
-
self.radioButton.toggled.connect(lambda: self.draw_mask('line')) # draw the line
|
43 |
-
self.radioButton_2.toggled.connect(lambda: self.draw_mask('rectangle')) # draw the rectangle
|
44 |
-
self.radioButton_3.toggled.connect(lambda: self.draw_mask('center')) # center mask
|
45 |
-
self.spinBox.valueChanged.connect(self.change_thickness)
|
46 |
-
self.pushButton.clicked.connect(self.clear_mask)
|
47 |
-
|
48 |
-
# fill image
|
49 |
-
self.pushButton_6.clicked.connect(self.fill_image)
|
50 |
-
self.comboBox_2.activated.connect(self.show_result)
|
51 |
-
self.pushButton_7.clicked.connect(self.save_result)
|
52 |
-
|
53 |
-
opt.preprocess = 'scale_shortside'
|
54 |
-
self.transform_o = get_transform(opt, convert=False, augment=False)
|
55 |
-
self.pil2tensor = transforms.ToTensor()
|
56 |
-
|
57 |
-
def show_logo(self):
|
58 |
-
"""Show the logo of NTU and BTC"""
|
59 |
-
img = QtWidgets.QLabel(self)
|
60 |
-
img.setGeometry(1000, 10, 140, 50)
|
61 |
-
|
62 |
-
pixmap = QtGui.QPixmap("./gui/logo/NTU_logo.jpg") # read examples
|
63 |
-
pixmap = pixmap.scaled(140, 140, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
|
64 |
-
img.setPixmap(pixmap)
|
65 |
-
img.show()
|
66 |
-
img1 = QtWidgets.QLabel(self)
|
67 |
-
img1.setGeometry(1200, 10, 70, 50)
|
68 |
-
|
69 |
-
pixmap1 = QtGui.QPixmap("./gui/logo/BTC_logo.png") # read examples
|
70 |
-
pixmap1 = pixmap1.scaled(70, 70, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
|
71 |
-
img1.setPixmap(pixmap1)
|
72 |
-
img1.show()
|
73 |
-
|
74 |
-
def show_image(self, img):
|
75 |
-
"""Show the masked examples"""
|
76 |
-
show_img = img.copy()
|
77 |
-
if self.mask_type == 'center':
|
78 |
-
sub_img = Image.fromarray(np.uint8(255 * np.ones((int(self.pw/2), int(self.pw/2), 3))))
|
79 |
-
mask = Image.fromarray(np.uint8(255 * np.ones((int(self.pw/2), int(self.pw/2)))))
|
80 |
-
show_img.paste(sub_img, box=(int(self.pw/4), int(self.pw/4)), mask=mask)
|
81 |
-
elif self.mask_type == 'external':
|
82 |
-
mask = Image.open(self.mname).resize(self.img_power.size).convert('RGB')
|
83 |
-
mask_L = Image.open(self.mname).resize(self.img_power.size).convert('L')
|
84 |
-
show_img = Image.composite(mask, show_img, mask_L)
|
85 |
-
self.new_painter(ImageQt.ImageQt(show_img))
|
86 |
-
|
87 |
-
def show_result(self):
|
88 |
-
"""Show different kind examples"""
|
89 |
-
value = self.comboBox_2.currentIndex()
|
90 |
-
if value == 0:
|
91 |
-
self.new_painter(ImageQt.ImageQt(self.img_power))
|
92 |
-
elif value == 1:
|
93 |
-
masked_img = torch.where(self.mask > 0, self.img_m, torch.ones_like(self.img_m))
|
94 |
-
masked_img = Image.fromarray(util.tensor2im(masked_img.detach()))
|
95 |
-
self.new_painter(ImageQt.ImageQt(masked_img))
|
96 |
-
elif value == 2:
|
97 |
-
if 'refine' in self.opt.coarse_or_refine:
|
98 |
-
img_out = Image.fromarray(util.tensor2im(self.img_ref_out.detach()))
|
99 |
-
else:
|
100 |
-
img_out = Image.fromarray(util.tensor2im(self.img_out.detach()))
|
101 |
-
self.new_painter(ImageQt.ImageQt(img_out))
|
102 |
-
|
103 |
-
def save_result(self):
|
104 |
-
"""Save the results to the disk"""
|
105 |
-
util.mkdir(self.opt.results_dir)
|
106 |
-
img_name = self.fname.split('/')[-1]
|
107 |
-
data_name = self.opt.img_file.split('/')[-1].split('.')[0]
|
108 |
-
|
109 |
-
original_name = '%s_%s_%s' % ('original', data_name, img_name) # save the original image
|
110 |
-
original_path = os.path.join(self.opt.results_dir, original_name)
|
111 |
-
img_original = util.tensor2im(self.img_truth)
|
112 |
-
util.save_image(img_original, original_path)
|
113 |
-
|
114 |
-
mask_name = '%s_%s_%d_%s' % ('mask', data_name, self.PaintPanel.iteration, img_name)
|
115 |
-
mask_path = os.path.join(self.opt.results_dir, mask_name)
|
116 |
-
mask = self.mask.repeat(1, 3, 1, 1)
|
117 |
-
img_mask = util.tensor2im(1-mask)
|
118 |
-
util.save_image(img_mask, mask_path)
|
119 |
-
|
120 |
-
#save masked image
|
121 |
-
masked_img_name = '%s_%s_%d_%s' % ('masked_img', data_name, self.PaintPanel.iteration, img_name)
|
122 |
-
img_path = os.path.join(self.opt.results_dir, masked_img_name)
|
123 |
-
img = torch.where(self.mask < 0.2, torch.ones_like(self.img_truth), self.img_truth)
|
124 |
-
masked_img = util.tensor2im(img)
|
125 |
-
util.save_image(masked_img, img_path)
|
126 |
-
|
127 |
-
# save the generated results
|
128 |
-
img_g_name = '%s_%s_%d_%s' % ('g', data_name, self.PaintPanel.iteration, img_name)
|
129 |
-
img_path = os.path.join(self.opt.results_dir, img_g_name)
|
130 |
-
img_g = util.tensor2im(self.img_g)
|
131 |
-
util.save_image(img_g, img_path)
|
132 |
-
|
133 |
-
# save the results
|
134 |
-
result_name = '%s_%s_%d_%s' % ('out', data_name, self.PaintPanel.iteration, img_name)
|
135 |
-
result_path = os.path.join(self.opt.results_dir, result_name)
|
136 |
-
img_result = util.tensor2im(self.img_out)
|
137 |
-
util.save_image(img_result, result_path)
|
138 |
-
|
139 |
-
# save the refined results
|
140 |
-
if 'tc' in self.opt.model and 'refine' in self.opt.coarse_or_refine:
|
141 |
-
result_name = '%s_%s_%d_%s' % ('ref', data_name, self.PaintPanel.iteration, img_name)
|
142 |
-
result_path = os.path.join(self.opt.results_dir, result_name)
|
143 |
-
img_result = util.tensor2im(self.img_ref_out)
|
144 |
-
util.save_image(img_result, result_path)
|
145 |
-
|
146 |
-
def load_model(self):
|
147 |
-
"""Load different kind models"""
|
148 |
-
value = self.comboBox.currentIndex()
|
149 |
-
if value == 0:
|
150 |
-
raise NotImplementedError("Please choose a model")
|
151 |
-
else:
|
152 |
-
index = value-1 # define the model type and dataset type
|
153 |
-
self.opt.name = self.model_names[index]
|
154 |
-
self.opt.img_file = self.img_root + self.img_files[index % len(self.img_files)]
|
155 |
-
self.model = create_model(self.opt)
|
156 |
-
self.model.setup(self.opt)
|
157 |
-
|
158 |
-
def load_image(self, fname):
|
159 |
-
"""Load the image"""
|
160 |
-
self.img_o = Image.open(fname).convert('RGB')
|
161 |
-
self.ow, self.oh = self.img_o.size
|
162 |
-
self.img_power = self.transform_o(self.img_o)
|
163 |
-
self.pw, self.ph = self.img_power.size
|
164 |
-
|
165 |
-
return self.img_power
|
166 |
-
|
167 |
-
def select_image(self):
|
168 |
-
"""Load the image"""
|
169 |
-
self.fname, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'select the image', self.opt.img_file, '*')
|
170 |
-
img = self.load_image(self.fname)
|
171 |
-
|
172 |
-
self.mask_type = 'none'
|
173 |
-
self.show_image(img)
|
174 |
-
|
175 |
-
def random_image(self):
|
176 |
-
"""Random load the test image"""
|
177 |
-
image_paths, image_size = make_dataset(self.opt.img_file)
|
178 |
-
item = random.randint(0, image_size-1)
|
179 |
-
self.fname = image_paths[item]
|
180 |
-
img = self.load_image(self.fname)
|
181 |
-
|
182 |
-
self.mask_type = 'none'
|
183 |
-
self.show_image(img)
|
184 |
-
|
185 |
-
def load_mask(self):
|
186 |
-
"""Load a mask"""
|
187 |
-
self.mask_type = 'external'
|
188 |
-
self.mname, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'select the mask', self.opt.mask_file,'*')
|
189 |
-
|
190 |
-
self.show_image(self.img_power)
|
191 |
-
|
192 |
-
def random_mask(self):
|
193 |
-
"""Random load the test mask"""
|
194 |
-
if self.opt.mask_file == 'none':
|
195 |
-
raise NotImplementedError("Please input the mask path")
|
196 |
-
self.mask_type = 'external'
|
197 |
-
mask_paths, mask_size = make_dataset(self.opt.mask_file)
|
198 |
-
item = random.randint(0, mask_size - 1)
|
199 |
-
self.mname = mask_paths[item]
|
200 |
-
|
201 |
-
self.show_image(self.img_power)
|
202 |
-
|
203 |
-
def read_mask(self):
|
204 |
-
"""Read the mask from the painted plain"""
|
205 |
-
self.PaintPanel.saveDraw()
|
206 |
-
buffer = QtCore.QBuffer()
|
207 |
-
buffer.open(QtCore.QBuffer.ReadWrite)
|
208 |
-
self.PaintPanel.map.save(buffer, 'PNG')
|
209 |
-
pil_im = Image.open(io.BytesIO(buffer.data()))
|
210 |
-
|
211 |
-
return pil_im
|
212 |
-
|
213 |
-
def new_painter(self, image=None):
|
214 |
-
"""Build a painter to load and process the image"""
|
215 |
-
# painter
|
216 |
-
self.PaintPanel = painter(self, image)
|
217 |
-
self.PaintPanel.close()
|
218 |
-
if image is not None:
|
219 |
-
w, h = image.size().width(), image.size().height()
|
220 |
-
self.stackedWidget.setGeometry(QtCore.QRect(250+int(512-w/2), 100+int(128-h/8), w, h))
|
221 |
-
self.stackedWidget.insertWidget(0, self.PaintPanel)
|
222 |
-
self.stackedWidget.setCurrentWidget(self.PaintPanel)
|
223 |
-
|
224 |
-
def change_thickness(self, num):
|
225 |
-
"""Change the width of the painter"""
|
226 |
-
self.CurrentWidth = num
|
227 |
-
self.PaintPanel.CurrentWidth = num
|
228 |
-
|
229 |
-
def draw_mask(self, masktype):
|
230 |
-
"""Draw the mask"""
|
231 |
-
if masktype == 'center':
|
232 |
-
self.mask_type = 'center'
|
233 |
-
if self.img_power is not None:
|
234 |
-
self.show_image(self.img_power)
|
235 |
-
else:
|
236 |
-
self.mask_type = 'draw'
|
237 |
-
self.shape = masktype
|
238 |
-
self.PaintPanel.shape = masktype
|
239 |
-
|
240 |
-
def clear_mask(self):
|
241 |
-
"""Clear the mask"""
|
242 |
-
self.mask_type = 'draw'
|
243 |
-
if self.PaintPanel.Brush:
|
244 |
-
self.PaintPanel.Brush = False
|
245 |
-
else:
|
246 |
-
self.PaintPanel.Brush = True
|
247 |
-
|
248 |
-
def set_input(self):
|
249 |
-
"""Set the input for the network"""
|
250 |
-
img_o = self.pil2tensor(self.img_o).unsqueeze(0)
|
251 |
-
img = self.pil2tensor(self.img_power).unsqueeze(0)
|
252 |
-
if self.mask_type == 'draw':
|
253 |
-
# get the test mask from painter
|
254 |
-
mask = self.read_mask()
|
255 |
-
mask = torch.autograd.Variable(self.pil2tensor(mask)).unsqueeze(0)[:, 0:1, :, :]
|
256 |
-
elif self.mask_type == 'center':
|
257 |
-
mask = torch.zeros_like(img)[:, 0:1, :, :]
|
258 |
-
mask[:, :, int(self.pw/4):int(3*self.pw/4), int(self.ph/4):int(3*self.ph/4)] = 1
|
259 |
-
elif self.mask_type == 'external':
|
260 |
-
mask = self.pil2tensor(Image.open(self.mname).resize((self.pw, self.ph)).convert('L')).unsqueeze(0)
|
261 |
-
mask = (mask < 0.5).float()
|
262 |
-
if len(self.opt.gpu_ids) > 0:
|
263 |
-
img = img.cuda(self.opt.gpu_ids[0])
|
264 |
-
mask = mask.cuda(self.opt.gpu_ids[0])
|
265 |
-
img_o = img_o.cuda(self.opt.gpu_ids[0])
|
266 |
-
|
267 |
-
self.mask = mask
|
268 |
-
self.img_org = img_o * 2 - 1
|
269 |
-
self.img_truth = img * 2 - 1
|
270 |
-
self.img_m = self.mask * self.img_truth
|
271 |
-
|
272 |
-
def fill_image(self):
|
273 |
-
"""Forward to get the completed results"""
|
274 |
-
self.set_input()
|
275 |
-
if self.PaintPanel.iteration < 1:
|
276 |
-
with torch.no_grad():
|
277 |
-
fixed_img = F.interpolate(self.img_m, size=[self.opt.fixed_size, self.opt.fixed_size], mode='bicubic', align_corners=True).clamp(-1, 1)
|
278 |
-
fixed_mask = (F.interpolate(self.mask, size=[self.opt.fixed_size, self.opt.fixed_size], mode='bicubic', align_corners=True) > 0.9).type_as(fixed_img)
|
279 |
-
out, mask = self.model.netE(fixed_img, mask=fixed_mask, return_mask=True)
|
280 |
-
out = self.model.netT(out, mask, bool_mask=False)
|
281 |
-
self.img_g = self.model.netG(out)
|
282 |
-
img_g_org = F.interpolate(self.img_g, size=self.img_truth.size()[2:], mode='bicubic', align_corners=True).clamp(-1, 1)
|
283 |
-
self.img_out = self.mask * self.img_truth + (1 - self.mask) * img_g_org
|
284 |
-
if 'refine' in self.opt.coarse_or_refine:
|
285 |
-
img_ref = self.model.netG_Ref(self.img_out, mask=self.mask)
|
286 |
-
self.img_ref_out = self.mask * self.img_truth + (1 - self.mask) * img_ref
|
287 |
-
print('finish the completion')
|
288 |
-
|
289 |
-
self.show_result_flag = True
|
290 |
-
self.show_result()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/demo/create_coco_dataset.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import typer
|
2 |
-
from groundingdino.util.inference import load_model, load_image, predict
|
3 |
-
from tqdm import tqdm
|
4 |
-
import torchvision
|
5 |
-
import torch
|
6 |
-
import fiftyone as fo
|
7 |
-
|
8 |
-
|
9 |
-
def main(
|
10 |
-
image_directory: str = 'test_grounding_dino',
|
11 |
-
text_prompt: str = 'bus, car',
|
12 |
-
box_threshold: float = 0.15,
|
13 |
-
text_threshold: float = 0.10,
|
14 |
-
export_dataset: bool = False,
|
15 |
-
view_dataset: bool = False,
|
16 |
-
export_annotated_images: bool = True,
|
17 |
-
weights_path : str = "groundingdino_swint_ogc.pth",
|
18 |
-
config_path: str = "../../GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py",
|
19 |
-
subsample: int = None,
|
20 |
-
):
|
21 |
-
|
22 |
-
model = load_model(config_path, weights_path)
|
23 |
-
|
24 |
-
dataset = fo.Dataset.from_images_dir(image_directory)
|
25 |
-
|
26 |
-
samples = []
|
27 |
-
|
28 |
-
if subsample is not None:
|
29 |
-
|
30 |
-
if subsample < len(dataset):
|
31 |
-
dataset = dataset.take(subsample).clone()
|
32 |
-
|
33 |
-
for sample in tqdm(dataset):
|
34 |
-
|
35 |
-
image_source, image = load_image(sample.filepath)
|
36 |
-
|
37 |
-
boxes, logits, phrases = predict(
|
38 |
-
model=model,
|
39 |
-
image=image,
|
40 |
-
caption=text_prompt,
|
41 |
-
box_threshold=box_threshold,
|
42 |
-
text_threshold=text_threshold,
|
43 |
-
)
|
44 |
-
|
45 |
-
detections = []
|
46 |
-
|
47 |
-
for box, logit, phrase in zip(boxes, logits, phrases):
|
48 |
-
|
49 |
-
rel_box = torchvision.ops.box_convert(box, 'cxcywh', 'xywh')
|
50 |
-
|
51 |
-
detections.append(
|
52 |
-
fo.Detection(
|
53 |
-
label=phrase,
|
54 |
-
bounding_box=rel_box,
|
55 |
-
confidence=logit,
|
56 |
-
))
|
57 |
-
|
58 |
-
# Store detections in a field name of your choice
|
59 |
-
sample["detections"] = fo.Detections(detections=detections)
|
60 |
-
sample.save()
|
61 |
-
|
62 |
-
# loads the voxel fiftyone UI ready for viewing the dataset.
|
63 |
-
if view_dataset:
|
64 |
-
session = fo.launch_app(dataset)
|
65 |
-
session.wait()
|
66 |
-
|
67 |
-
# exports COCO dataset ready for training
|
68 |
-
if export_dataset:
|
69 |
-
dataset.export(
|
70 |
-
'coco_dataset',
|
71 |
-
dataset_type=fo.types.COCODetectionDataset,
|
72 |
-
)
|
73 |
-
|
74 |
-
# saves bounding boxes plotted on the input images to disk
|
75 |
-
if export_annotated_images:
|
76 |
-
dataset.draw_labels(
|
77 |
-
'images_with_bounding_boxes',
|
78 |
-
label_fields=['detections']
|
79 |
-
)
|
80 |
-
|
81 |
-
|
82 |
-
if __name__ == '__main__':
|
83 |
-
typer.run(main)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/pipeline.py
DELETED
@@ -1,229 +0,0 @@
|
|
1 |
-
import datetime
|
2 |
-
import os
|
3 |
-
from string import Template
|
4 |
-
import openai
|
5 |
-
import re
|
6 |
-
import glob
|
7 |
-
import pickle
|
8 |
-
import time
|
9 |
-
import json5
|
10 |
-
from retrying import retry
|
11 |
-
from code_generator import check_json_script, collect_and_check_audio_data
|
12 |
-
import random
|
13 |
-
import string
|
14 |
-
|
15 |
-
import utils
|
16 |
-
import voice_presets
|
17 |
-
from code_generator import AudioCodeGenerator
|
18 |
-
|
19 |
-
# Enable this for debugging
|
20 |
-
USE_OPENAI_CACHE = False
|
21 |
-
openai_cache = []
|
22 |
-
if USE_OPENAI_CACHE:
|
23 |
-
os.makedirs('cache', exist_ok=True)
|
24 |
-
for cache_file in glob.glob('cache/*.pkl'):
|
25 |
-
with open(cache_file, 'rb') as file:
|
26 |
-
openai_cache.append(pickle.load(file))
|
27 |
-
|
28 |
-
def chat_with_gpt(prompt, api_key):
|
29 |
-
if USE_OPENAI_CACHE:
|
30 |
-
filtered_object = list(filter(lambda x: x['prompt'] == prompt, openai_cache))
|
31 |
-
if len(filtered_object) > 0:
|
32 |
-
response = filtered_object[0]['response']
|
33 |
-
return response
|
34 |
-
|
35 |
-
try:
|
36 |
-
openai.api_key = api_key
|
37 |
-
chat = openai.ChatCompletion.create(
|
38 |
-
# model="gpt-3.5-turbo",
|
39 |
-
model="gpt-4",
|
40 |
-
messages=[
|
41 |
-
{
|
42 |
-
"role": "system",
|
43 |
-
"content": "You are a helpful assistant."
|
44 |
-
},
|
45 |
-
{
|
46 |
-
"role": "user",
|
47 |
-
"content": prompt
|
48 |
-
}
|
49 |
-
]
|
50 |
-
)
|
51 |
-
finally:
|
52 |
-
openai.api_key = ''
|
53 |
-
|
54 |
-
if USE_OPENAI_CACHE:
|
55 |
-
cache_obj = {
|
56 |
-
'prompt': prompt,
|
57 |
-
'response': chat['choices'][0]['message']['content']
|
58 |
-
}
|
59 |
-
with open(f'cache/{time.time()}.pkl', 'wb') as _openai_cache:
|
60 |
-
pickle.dump(cache_obj, _openai_cache)
|
61 |
-
openai_cache.append(cache_obj)
|
62 |
-
|
63 |
-
return chat['choices'][0]['message']['content']
|
64 |
-
|
65 |
-
|
66 |
-
def get_file_content(filename):
|
67 |
-
with open(filename, 'r') as file:
|
68 |
-
return file.read().strip()
|
69 |
-
|
70 |
-
|
71 |
-
def write_to_file(filename, content):
|
72 |
-
with open(filename, 'w') as file:
|
73 |
-
file.write(content)
|
74 |
-
|
75 |
-
|
76 |
-
def extract_substring_with_quotes(input_string, quotes="'''"):
|
77 |
-
pattern = f"{quotes}(.*?){quotes}"
|
78 |
-
matches = re.findall(pattern, input_string, re.DOTALL)
|
79 |
-
return matches
|
80 |
-
|
81 |
-
|
82 |
-
def try_extract_content_from_quotes(content):
|
83 |
-
if "'''" in content:
|
84 |
-
return extract_substring_with_quotes(content)[0]
|
85 |
-
elif "```" in content:
|
86 |
-
return extract_substring_with_quotes(content, quotes="```")[0]
|
87 |
-
else:
|
88 |
-
return content
|
89 |
-
|
90 |
-
def maybe_get_content_from_file(content_or_filename):
|
91 |
-
if os.path.exists(content_or_filename):
|
92 |
-
with open(content_or_filename, 'r') as file:
|
93 |
-
return file.read().strip()
|
94 |
-
return content_or_filename
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
# Pipeline Interface Guidelines:
|
99 |
-
#
|
100 |
-
# Init calls:
|
101 |
-
# - Init calls must be called before running the actual steps
|
102 |
-
# - init_session() is called every time a gradio webpage is loaded
|
103 |
-
#
|
104 |
-
# Single Step:
|
105 |
-
# - takes input (file or content) and output path as input
|
106 |
-
# - most of time just returns output content
|
107 |
-
#
|
108 |
-
# Compositional Step:
|
109 |
-
# - takes session_id as input (you have session_id, you have all the paths)
|
110 |
-
# - run a series of steps
|
111 |
-
|
112 |
-
# This is called for every new gradio webpage
|
113 |
-
|
114 |
-
def init_session(session_id=''):
|
115 |
-
def uid8():
|
116 |
-
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))
|
117 |
-
|
118 |
-
if session_id == '':
|
119 |
-
session_id = f'{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}_{uid8()}'
|
120 |
-
# create the paths
|
121 |
-
os.makedirs(utils.get_session_voice_preset_path(session_id))
|
122 |
-
os.makedirs(utils.get_session_audio_path(session_id))
|
123 |
-
print(f'New session created, session_id={session_id}')
|
124 |
-
return session_id
|
125 |
-
|
126 |
-
@retry(stop_max_attempt_number=3)
|
127 |
-
def input_text_to_json_script_with_retry(complete_prompt_path, api_key):
|
128 |
-
print(" trying ...")
|
129 |
-
complete_prompt = get_file_content(complete_prompt_path)
|
130 |
-
json_response = try_extract_content_from_quotes(chat_with_gpt(complete_prompt, api_key))
|
131 |
-
json_data = json5.loads(json_response)
|
132 |
-
|
133 |
-
try:
|
134 |
-
check_json_script(json_data)
|
135 |
-
collect_and_check_audio_data(json_data)
|
136 |
-
except Exception as err:
|
137 |
-
print(f'JSON ERROR: {err}')
|
138 |
-
retry_complete_prompt = f'{complete_prompt}\n```\n{json_response}```\nThe script above has format error(s). Return the fixed script.\n\nScript:\n'
|
139 |
-
write_to_file(complete_prompt_path, retry_complete_prompt)
|
140 |
-
raise err
|
141 |
-
|
142 |
-
return json_response
|
143 |
-
|
144 |
-
# Step 1: input_text to json
|
145 |
-
def input_text_to_json_script(input_text, output_path, api_key):
|
146 |
-
input_text = maybe_get_content_from_file(input_text)
|
147 |
-
text_to_audio_script_prompt = get_file_content('prompts/text_to_json.prompt')
|
148 |
-
prompt = f'{text_to_audio_script_prompt}\n\nInput text: {input_text}\n\nScript:\n'
|
149 |
-
complete_prompt_path = output_path / 'complete_input_text_to_audio_script.prompt'
|
150 |
-
write_to_file(complete_prompt_path, prompt)
|
151 |
-
audio_script_response = input_text_to_json_script_with_retry(complete_prompt_path, api_key)
|
152 |
-
generated_audio_script_filename = output_path / 'audio_script.json'
|
153 |
-
write_to_file(generated_audio_script_filename, audio_script_response)
|
154 |
-
return audio_script_response
|
155 |
-
|
156 |
-
# Step 2: json to char-voice map
|
157 |
-
def json_script_to_char_voice_map(json_script, voices, output_path, api_key):
|
158 |
-
json_script_content = maybe_get_content_from_file(json_script)
|
159 |
-
prompt = get_file_content('prompts/audio_script_to_character_voice_map.prompt')
|
160 |
-
presets_str = '\n'.join(f"{preset['id']}: {preset['desc']}" for preset in voices.values())
|
161 |
-
prompt = Template(prompt).substitute(voice_and_desc=presets_str)
|
162 |
-
prompt = f"{prompt}\n\nAudio script:\n'''\n{json_script_content}\n'''\n\noutput:\n"
|
163 |
-
write_to_file(output_path / 'complete_audio_script_to_char_voice_map.prompt', prompt)
|
164 |
-
char_voice_map_response = try_extract_content_from_quotes(chat_with_gpt(prompt, api_key))
|
165 |
-
char_voice_map = json5.loads(char_voice_map_response)
|
166 |
-
# enrich char_voice_map with voice preset metadata
|
167 |
-
complete_char_voice_map = {c: voices[char_voice_map[c]] for c in char_voice_map}
|
168 |
-
char_voice_map_filename = output_path / 'character_voice_map.json'
|
169 |
-
write_to_file(char_voice_map_filename, json5.dumps(complete_char_voice_map))
|
170 |
-
return complete_char_voice_map
|
171 |
-
|
172 |
-
# Step 3: json to py code
|
173 |
-
def json_script_and_char_voice_map_to_audio_gen_code(json_script_filename, char_voice_map_filename, output_path, result_filename):
|
174 |
-
audio_code_generator = AudioCodeGenerator()
|
175 |
-
code = audio_code_generator.parse_and_generate(
|
176 |
-
json_script_filename,
|
177 |
-
char_voice_map_filename,
|
178 |
-
output_path,
|
179 |
-
result_filename
|
180 |
-
)
|
181 |
-
write_to_file(output_path / 'audio_generation.py', code)
|
182 |
-
|
183 |
-
# Step 4: py code to final wav
|
184 |
-
def audio_code_gen_to_result(audio_gen_code_path):
|
185 |
-
audio_gen_code_filename = audio_gen_code_path / 'audio_generation.py'
|
186 |
-
os.system(f'PYTHONPATH=. python {audio_gen_code_filename}')
|
187 |
-
|
188 |
-
# Function call used by Gradio: input_text to json
|
189 |
-
def generate_json_file(session_id, input_text, api_key):
|
190 |
-
output_path = utils.get_session_path(session_id)
|
191 |
-
# Step 1
|
192 |
-
print(f'session_id={session_id}, Step 1: Writing audio script based on text: {input_text} ...')
|
193 |
-
return input_text_to_json_script(input_text, output_path, api_key)
|
194 |
-
|
195 |
-
# Function call used by Gradio: json to result wav
|
196 |
-
def generate_audio(session_id, json_script, api_key):
|
197 |
-
def count_lines(content):
|
198 |
-
# Split the string using the newline character and count the non-empty lines
|
199 |
-
return sum(1 for line in content.split('\n') if line.strip())
|
200 |
-
|
201 |
-
max_lines = utils.get_max_script_lines()
|
202 |
-
if count_lines(json_script) > max_lines:
|
203 |
-
raise ValueError(f'The number of lines of the JSON script has exceeded {max_lines}!')
|
204 |
-
|
205 |
-
output_path = utils.get_session_path(session_id)
|
206 |
-
output_audio_path = utils.get_session_audio_path(session_id)
|
207 |
-
voices = voice_presets.get_merged_voice_presets(session_id)
|
208 |
-
|
209 |
-
# Step 2
|
210 |
-
print(f'session_id={session_id}, Step 2: Parsing character voice with LLM...')
|
211 |
-
char_voice_map = json_script_to_char_voice_map(json_script, voices, output_path, api_key)
|
212 |
-
# Step 3
|
213 |
-
json_script_filename = output_path / 'audio_script.json'
|
214 |
-
char_voice_map_filename = output_path / 'character_voice_map.json'
|
215 |
-
result_wav_basename = f'res_{session_id}'
|
216 |
-
print(f'session_id={session_id}, Step 3: Compiling audio script to Python program ...')
|
217 |
-
json_script_and_char_voice_map_to_audio_gen_code(json_script_filename, char_voice_map_filename, output_path, result_wav_basename)
|
218 |
-
# Step 4
|
219 |
-
print(f'session_id={session_id}, Step 4: Start running Python program ...')
|
220 |
-
audio_code_gen_to_result(output_path)
|
221 |
-
|
222 |
-
result_wav_filename = output_audio_path / f'{result_wav_basename}.wav'
|
223 |
-
print(f'Done all processes, result: {result_wav_filename}')
|
224 |
-
return result_wav_filename, char_voice_map
|
225 |
-
|
226 |
-
# Convenient function call used by wavjourney_cli
|
227 |
-
def full_steps(session_id, input_text, api_key):
|
228 |
-
json_script = generate_json_file(session_id, input_text, api_key)
|
229 |
-
return generate_audio(session_id, json_script, api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/grit/config.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
from detectron2.config import CfgNode as CN
|
2 |
-
|
3 |
-
|
4 |
-
def add_grit_config(cfg):
|
5 |
-
_C = cfg
|
6 |
-
|
7 |
-
_C.MODEL.BEAM_SIZE = 1
|
8 |
-
_C.MODEL.TRAIN_TASK = ["ObjectDet", "DenseCap"]
|
9 |
-
_C.MODEL.TEST_TASK = "DenseCap" # This can be varied if the model is jointly trained on multiple tasks
|
10 |
-
|
11 |
-
_C.MODEL.ROI_BOX_HEAD.USE_BIAS = 0.0 # >= 0: not use
|
12 |
-
_C.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE = False
|
13 |
-
|
14 |
-
_C.MODEL.ROI_HEADS.MASK_WEIGHT = 1.0
|
15 |
-
_C.MODEL.ROI_HEADS.OBJECT_FEAT_POOLER_RES = 14
|
16 |
-
_C.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
|
17 |
-
|
18 |
-
# Backbones
|
19 |
-
_C.MODEL.VIT_LAYERS = 12
|
20 |
-
|
21 |
-
# Text Decoder
|
22 |
-
_C.TEXT_DECODER = CN()
|
23 |
-
_C.TEXT_DECODER.VOCAB_SIZE = 30522
|
24 |
-
_C.TEXT_DECODER.HIDDEN_SIZE = 768
|
25 |
-
_C.TEXT_DECODER.NUM_LAYERS = 6
|
26 |
-
_C.TEXT_DECODER.ATTENTION_HEADS = 12
|
27 |
-
_C.TEXT_DECODER.FEEDFORWARD_SIZE = 768 * 4
|
28 |
-
|
29 |
-
# Multi-dataset dataloader
|
30 |
-
_C.DATALOADER.DATASET_RATIO = [1, 1] # sample ratio
|
31 |
-
_C.DATALOADER.DATASET_BS = 1
|
32 |
-
_C.DATALOADER.DATASET_INPUT_SIZE = [1024, 1024]
|
33 |
-
_C.DATALOADER.DATASET_INPUT_SCALE = [(0.1, 2.0), (0.1, 2.0)]
|
34 |
-
_C.DATALOADER.DATASET_MIN_SIZES = [(640, 800), (640, 800)]
|
35 |
-
_C.DATALOADER.DATASET_MAX_SIZES = [1333, 1333]
|
36 |
-
|
37 |
-
_C.SOLVER.USE_CUSTOM_SOLVER = True
|
38 |
-
_C.SOLVER.OPTIMIZER = 'ADAMW'
|
39 |
-
_C.SOLVER.VIT_LAYER_DECAY = True
|
40 |
-
_C.SOLVER.VIT_LAYER_DECAY_RATE = 0.7
|
41 |
-
|
42 |
-
_C.INPUT.CUSTOM_AUG = 'EfficientDetResizeCrop'
|
43 |
-
_C.INPUT.TRAIN_SIZE = 1024
|
44 |
-
_C.INPUT.TEST_SIZE = 1024
|
45 |
-
_C.INPUT.SCALE_RANGE = (0.1, 2.)
|
46 |
-
# 'default' for fixed short / long edge
|
47 |
-
_C.INPUT.TEST_INPUT_TYPE = 'default'
|
48 |
-
|
49 |
-
_C.FIND_UNUSED_PARAM = True
|
50 |
-
_C.USE_ACT_CHECKPOINT = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/custom_dataset_mapper.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/data/custom_dataset_mapper.py
|
3 |
-
import copy
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from detectron2.config import configurable
|
8 |
-
|
9 |
-
from detectron2.data import detection_utils as utils
|
10 |
-
from detectron2.data import transforms as T
|
11 |
-
from detectron2.data.dataset_mapper import DatasetMapper
|
12 |
-
from .custom_build_augmentation import build_custom_augmentation
|
13 |
-
from itertools import compress
|
14 |
-
import logging
|
15 |
-
|
16 |
-
__all__ = ["CustomDatasetMapper", "ObjDescription"]
|
17 |
-
logger = logging.getLogger(__name__)
|
18 |
-
|
19 |
-
|
20 |
-
class CustomDatasetMapper(DatasetMapper):
|
21 |
-
@configurable
|
22 |
-
def __init__(self, is_train: bool,
|
23 |
-
dataset_augs=[],
|
24 |
-
**kwargs):
|
25 |
-
if is_train:
|
26 |
-
self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs]
|
27 |
-
super().__init__(is_train, **kwargs)
|
28 |
-
|
29 |
-
@classmethod
|
30 |
-
def from_config(cls, cfg, is_train: bool = True):
|
31 |
-
ret = super().from_config(cfg, is_train)
|
32 |
-
if is_train:
|
33 |
-
if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
|
34 |
-
dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE
|
35 |
-
dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE
|
36 |
-
ret['dataset_augs'] = [
|
37 |
-
build_custom_augmentation(cfg, True, scale, size) \
|
38 |
-
for scale, size in zip(dataset_scales, dataset_sizes)]
|
39 |
-
else:
|
40 |
-
assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge'
|
41 |
-
min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES
|
42 |
-
max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES
|
43 |
-
ret['dataset_augs'] = [
|
44 |
-
build_custom_augmentation(
|
45 |
-
cfg, True, min_size=mi, max_size=ma) \
|
46 |
-
for mi, ma in zip(min_sizes, max_sizes)]
|
47 |
-
else:
|
48 |
-
ret['dataset_augs'] = []
|
49 |
-
|
50 |
-
return ret
|
51 |
-
|
52 |
-
def __call__(self, dataset_dict):
|
53 |
-
dataset_dict_out = self.prepare_data(dataset_dict)
|
54 |
-
|
55 |
-
# When augmented image is too small, do re-augmentation
|
56 |
-
retry = 0
|
57 |
-
while (dataset_dict_out["image"].shape[1] < 32 or dataset_dict_out["image"].shape[2] < 32):
|
58 |
-
retry += 1
|
59 |
-
if retry == 100:
|
60 |
-
logger.info('Retry 100 times for augmentation. Make sure the image size is not too small.')
|
61 |
-
logger.info('Find image information below')
|
62 |
-
logger.info(dataset_dict)
|
63 |
-
dataset_dict_out = self.prepare_data(dataset_dict)
|
64 |
-
|
65 |
-
return dataset_dict_out
|
66 |
-
|
67 |
-
def prepare_data(self, dataset_dict_in):
|
68 |
-
dataset_dict = copy.deepcopy(dataset_dict_in)
|
69 |
-
if 'file_name' in dataset_dict:
|
70 |
-
ori_image = utils.read_image(
|
71 |
-
dataset_dict["file_name"], format=self.image_format)
|
72 |
-
else:
|
73 |
-
ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]]
|
74 |
-
ori_image = utils._apply_exif_orientation(ori_image)
|
75 |
-
ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format)
|
76 |
-
utils.check_image_size(dataset_dict, ori_image)
|
77 |
-
|
78 |
-
aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=None)
|
79 |
-
if self.is_train:
|
80 |
-
transforms = \
|
81 |
-
self.dataset_augs[dataset_dict['dataset_source']](aug_input)
|
82 |
-
else:
|
83 |
-
transforms = self.augmentations(aug_input)
|
84 |
-
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
|
85 |
-
|
86 |
-
image_shape = image.shape[:2]
|
87 |
-
dataset_dict["image"] = torch.as_tensor(
|
88 |
-
np.ascontiguousarray(image.transpose(2, 0, 1)))
|
89 |
-
|
90 |
-
if not self.is_train:
|
91 |
-
# USER: Modify this if you want to keep them for some reason.
|
92 |
-
dataset_dict.pop("annotations", None)
|
93 |
-
return dataset_dict
|
94 |
-
|
95 |
-
if "annotations" in dataset_dict:
|
96 |
-
if len(dataset_dict["annotations"]) > 0:
|
97 |
-
object_descriptions = [an['object_description'] for an in dataset_dict["annotations"]]
|
98 |
-
else:
|
99 |
-
object_descriptions = []
|
100 |
-
# USER: Modify this if you want to keep them for some reason.
|
101 |
-
for anno in dataset_dict["annotations"]:
|
102 |
-
if not self.use_instance_mask:
|
103 |
-
anno.pop("segmentation", None)
|
104 |
-
if not self.use_keypoint:
|
105 |
-
anno.pop("keypoints", None)
|
106 |
-
|
107 |
-
all_annos = [
|
108 |
-
(utils.transform_instance_annotations(
|
109 |
-
obj, transforms, image_shape,
|
110 |
-
keypoint_hflip_indices=self.keypoint_hflip_indices,
|
111 |
-
), obj.get("iscrowd", 0))
|
112 |
-
for obj in dataset_dict.pop("annotations")
|
113 |
-
]
|
114 |
-
annos = [ann[0] for ann in all_annos if ann[1] == 0]
|
115 |
-
instances = utils.annotations_to_instances(
|
116 |
-
annos, image_shape, mask_format=self.instance_mask_format
|
117 |
-
)
|
118 |
-
|
119 |
-
instances.gt_object_descriptions = ObjDescription(object_descriptions)
|
120 |
-
|
121 |
-
del all_annos
|
122 |
-
if self.recompute_boxes:
|
123 |
-
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
|
124 |
-
dataset_dict["instances"] = utils.filter_empty_instances(instances)
|
125 |
-
|
126 |
-
return dataset_dict
|
127 |
-
|
128 |
-
|
129 |
-
class ObjDescription:
|
130 |
-
def __init__(self, object_descriptions):
|
131 |
-
self.data = object_descriptions
|
132 |
-
|
133 |
-
def __getitem__(self, item):
|
134 |
-
assert type(item) == torch.Tensor
|
135 |
-
assert item.dim() == 1
|
136 |
-
if len(item) > 0:
|
137 |
-
assert item.dtype == torch.int64 or item.dtype == torch.bool
|
138 |
-
if item.dtype == torch.int64:
|
139 |
-
return ObjDescription([self.data[x.item()] for x in item])
|
140 |
-
elif item.dtype == torch.bool:
|
141 |
-
return ObjDescription(list(compress(self.data, item)))
|
142 |
-
|
143 |
-
return ObjDescription(list(compress(self.data, item)))
|
144 |
-
|
145 |
-
def __len__(self):
|
146 |
-
return len(self.data)
|
147 |
-
|
148 |
-
def __repr__(self):
|
149 |
-
return "ObjDescription({})".format(self.data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzumaSeren100/XuanShen-Bert-VITS2/README.md
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
title: Xuanshen-BERT-VITS2
|
4 |
-
sdk: gradio
|
5 |
-
emoji: 🚀
|
6 |
-
colorFrom: yellow
|
7 |
-
colorTo: red
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
---
|
11 |
-
license: apache-2.0
|
12 |
-
sdk: gradio
|
13 |
-
title: Seren10
|
14 |
-
emoji: 🏆
|
15 |
-
colorFrom: red
|
16 |
-
colorTo: red
|
17 |
-
pinned: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apkue.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>APKue: ¿Qué es y cómo usarlo? </h1>
|
3 |
-
<p>¿Alguna vez has querido descargar una aplicación o un juego que no está disponible en Google Play Store? ¿O quizás quieres probar una versión más antigua o más nueva de una aplicación que no es compatible con tu dispositivo? Si es así, puede que te interese APKue, una tienda de aplicaciones alternativa que te permite descargar todo tipo de aplicaciones que no puedes encontrar en la tienda oficial. En este artículo, explicaremos qué es APKue, por qué deberías usarlo y cómo usarlo para descargar aplicaciones y juegos en tu dispositivo Android. </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<h3>¿Qué es APKue? </h3>
|
6 |
-
<p>APKue es una aplicación que te permite descargar e instalar aplicaciones y juegos para Android desde una fuente de terceros. Es similar a otras tiendas de aplicaciones como Aptoide, Uptodown o APKMirror, pero tiene algunas características únicas que lo hacen destacar. Por ejemplo, APKue tiene una interfaz simple y fácil de usar, un catálogo grande y actualizado de aplicaciones y juegos, y un proceso de descarga rápido y seguro. También puede usar APKue para actualizar sus aplicaciones existentes, desinstalar aplicaciones no deseadas y administrar sus descargas. </p>
|
7 |
-
<h2>apkue</h2><br /><p><b><b>Download File</b> ✺ <a href="https://bltlly.com/2v6K6s">https://bltlly.com/2v6K6s</a></b></p><br /><br />
|
8 |
-
<h3>¿Por qué usar APKue? </h3>
|
9 |
-
<p>Hay muchas razones por las que es posible que desee utilizar APKue en lugar de la Google Play Store. Estos son algunos de ellos:</p>
|
10 |
-
<ul>
|
11 |
-
<li> Puede acceder a aplicaciones y juegos que no están disponibles en su región o país. </li>
|
12 |
-
<li> Puede descargar versiones más antiguas o más nuevas de aplicaciones y juegos que no son compatibles con su dispositivo o tienen errores o problemas. </li>
|
13 |
-
<li> Puede probar versiones beta o modificadas de aplicaciones y juegos que tienen características o funciones adicionales. </li>
|
14 |
-
<li> Puede evitar anuncios, compras en la aplicación u otras restricciones que algunas aplicaciones y juegos tienen. </li>
|
15 |
-
<li> Puede ahorrar espacio de almacenamiento en su dispositivo descargando solo los archivos APK en lugar de todo el paquete de la aplicación. </li>
|
16 |
-
</ul>
|
17 |
-
|
18 |
-
<h2>Cómo descargar e instalar APKue en tu dispositivo Android</h2>
|
19 |
-
<h3>Paso 1: Habilitar fuentes desconocidas</h3>
|
20 |
-
<p>Antes de que pueda instalar APKue en su dispositivo, debe habilitar la opción de instalar aplicaciones de fuentes desconocidas. Esta opción suele estar deshabilitada de forma predeterminada por razones de seguridad, pero puede habilitarla fácilmente siguiendo estos pasos:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Ve a la configuración de tu dispositivo y toca Seguridad o Privacidad.</li>
|
23 |
-
<li>Encontrar la opción que dice Fuentes desconocidas o Instalar aplicaciones desconocidas y alternar en. </li>
|
24 |
-
<li> Aparecerá un mensaje de advertencia. Toque en Aceptar o Permitir para confirmar. </li>
|
25 |
-
</ol>
|
26 |
-
<p>Ahora está listo para instalar APKue en su dispositivo. </p>
|
27 |
-
<h3>Paso 2: Descargar APKue desde su sitio web oficial</h3>
|
28 |
-
<p>El siguiente paso es descargar el archivo APK de APKue desde su sitio web oficial. Puede hacer esto siguiendo estos pasos:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Abra su navegador y vaya a [APKPure]( 1 ), el sitio web oficial de APKue.</li>
|
31 |
-
<li> Toque en el botón Descargar en la esquina superior derecha de la pantalla. </li>
|
32 |
-
<li>Aparecerá una ventana emergente. Toque en Aceptar o Descargar para iniciar el proceso de descarga. </li> <h3>Paso 3: Instalar APKue y lanzarlo</h3>
|
33 |
-
<p>Una vez completada la descarga, puedes instalar APKue en tu dispositivo siguiendo estos pasos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Vaya al administrador de archivos de su dispositivo y busque el archivo APK de APKue. Debe estar en la carpeta Descargas o en la carpeta que eligió para guardarlo. </li>
|
36 |
-
<li>Toque en el archivo APK y aparecerá una ventana emergente. Toque en Instalar para iniciar el proceso de instalación. </li>
|
37 |
-
<li> Espere unos segundos hasta que finalice la instalación. Toque en Abrir para iniciar APKue o Listo para salir. </li>
|
38 |
-
</ol>
|
39 |
-
<p>¡Enhorabuena! Ha instalado con éxito APKue en su dispositivo. Ahora puede usarlo para descargar aplicaciones y juegos que desee. </p>
|
40 |
-
<h2>Cómo usar APKue para descargar aplicaciones y juegos</h2>
|
41 |
-
<h3>Paso 1: Buscar la aplicación o juego que desea</h3>
|
42 |
-
|
43 |
-
<ol>
|
44 |
-
<li> Abrir APKue y toque en el icono de búsqueda en la esquina superior derecha de la pantalla. </li>
|
45 |
-
<li> Escriba el nombre de la aplicación o juego que desea en el cuadro de búsqueda y toque en el icono de la lupa. </li>
|
46 |
-
<li>Aparecerá una lista de resultados. Puede filtrarlos por categoría, popularidad, calificación o fecha de actualización. </li>
|
47 |
-
<li>Toque en la aplicación o juego que desea descargar. Verá sus detalles, capturas de pantalla, comentarios y versiones. </li>
|
48 |
-
</ol>
|
49 |
-
<h3>Paso 2: Elige la versión y descárgala</h3>
|
50 |
-
<p>El siguiente paso es elegir la versión de la aplicación o juego que desea descargar. Puede hacer esto siguiendo estos pasos:</p>
|
51 |
-
<ol>
|
52 |
-
<li>Desplácese hacia abajo a la sección Versiones y toque en Ver APKs disponibles.</li>
|
53 |
-
<li> Aparecerá una lista de versiones. Puede ver su tamaño, fecha y compatibilidad. </li>
|
54 |
-
<li>Toque en la versión que desea descargar. Aparecerá una ventana emergente. Toque en Descargar APK para iniciar el proceso de descarga. </li>
|
55 |
-
<li>Una barra de progreso le mostrará el estado de la descarga. Puede pausarla, reanudarla o cancelarla en cualquier momento. </li>
|
56 |
-
</ol>
|
57 |
-
<h3>Paso 3: Instalar la aplicación o juego y disfrutar de ella</h3>
|
58 |
-
<p>El paso final es instalar la aplicación o juego que has descargado y disfrutarlo en tu dispositivo. Puedes hacerlo siguiendo estos pasos:</p>
|
59 |
-
<p></p>
|
60 |
-
<ol>
|
61 |
-
<li>Ir al administrador de archivos de su dispositivo y localizar el archivo APK de la aplicación o juego. Debe estar en la carpeta Descargas o en la carpeta que eligió para guardarlo. </li>
|
62 |
-
<li>Toque en el archivo APK y aparecerá una ventana emergente. Toque en Instalar para iniciar el proceso de instalación. </li>
|
63 |
-
<li>Espere unos segundos hasta que finalice la instalación. Pulse Abrir para iniciar la aplicación o el juego o Listo para salir. </li>
|
64 |
-
</ol>
|
65 |
-
<p>¡Eso es todo! Has descargado e instalado correctamente una aplicación o juego usando APKue. Ahora puedes disfrutarlo en tu dispositivo. </p>
|
66 |
-
<h2>Conclusión</h2>
|
67 |
-
<h3>Resumen de los puntos principales</h3>
|
68 |
-
|
69 |
-
<h3>Llamada a la acción y pensamientos finales</h3>
|
70 |
-
<p>Si estás buscando una tienda de aplicaciones alternativa que te permita descargar todo tipo de aplicaciones que no puedes encontrar en Google Play Store, entonces APKue es una gran opción para ti. Es simple, rápido, seguro y actualizado. Puede usarlo para acceder a aplicaciones y juegos que no están disponibles en su región o país, descargar versiones más antiguas o más nuevas de aplicaciones y juegos que no son compatibles con su dispositivo o tienen errores o problemas, probar versiones beta o modificadas de aplicaciones y juegos que tienen características o funciones adicionales, evitar anuncios, compras en la aplicación u otras restricciones que algunas aplicaciones y juegos tienen, ahorrar espacio de almacenamiento en su dispositivo descargando solo los archivos APK en lugar de todo el paquete de aplicaciones, actualizar sus aplicaciones existentes, desinstalar aplicaciones no deseadas, y gestionar sus descargas. </p>
|
71 |
-
<p>Si quieres probar APKue por ti mismo, puedes descargarlo desde su sitio web oficial [APKPure]. Es gratuito y fácil de usar. Solo recuerde habilitar fuentes desconocidas antes de instalarlo y escanear cualquier aplicación o juego que descargue con un software antivirus antes de instalarlos. Además, sé respetuoso con los desarrolladores y los propietarios de las aplicaciones y juegos que descargues y no los utilices con fines ilegales o poco éticos. </p>
|
72 |
-
<p>Gracias por leer este artículo. Esperamos que hayas aprendido algo nuevo y útil. Si tiene alguna pregunta, comentario o comentario, no dude en dejarlos a continuación. Nos encantaría saber de usted. Y si te gustó este artículo, por favor compártelo con tus amigos y familiares que podrían estar interesados en APKue también. ¡Feliz descarga! </p>
|
73 |
-
<h2>Preguntas frecuentes</h2>
|
74 |
-
<p>Aquí hay algunas preguntas frecuentes sobre APKue que puede ser útil:</p>
|
75 |
-
<h4>¿Es seguro usar APKue? </h4>
|
76 |
-
|
77 |
-
<h4>¿Es legal usar APKue? </h4>
|
78 |
-
<p>APKue es legal de usar, ya que no aloja ninguna aplicación o juego en sus servidores. Solo proporciona enlaces para descargarlos de otras fuentes. Sin embargo, algunas de las aplicaciones y juegos que puedes descargar desde APKue pueden ser ilegales o infringir los derechos de propiedad intelectual, por lo que siempre debes verificar la legalidad y legitimidad de las aplicaciones y juegos que descargas y usarlos bajo tu propio riesgo. </p>
|
79 |
-
<h4>¿Cómo puedo actualizar las aplicaciones y juegos que descargo de APKue? </h4>
|
80 |
-
<p>Puede actualizar las aplicaciones y juegos que descarga desde APKue mediante el uso de la propia aplicación. APKue le notificará cuando haya una nueva versión disponible para cualquier aplicación o juego que haya descargado. Puede optar por actualizarla o no. Alternativamente, también puede comprobar si hay actualizaciones manualmente yendo a la página de la aplicación o del juego y tocando el botón Actualizar. </p>
|
81 |
-
<h4>¿Cómo puedo desinstalar las aplicaciones y juegos que descargo de APKue? </h4>
|
82 |
-
<p>Puede desinstalar las aplicaciones y juegos que descarga desde APKue mediante la configuración de su dispositivo o el administrador de archivos. También puedes usar APKue para desinstalarlos. Solo tienes que ir a la página de la app o del juego y pulsar el botón Desinstalar. </p>
|
83 |
-
<h4>¿Cómo puedo contactar con los desarrolladores o los propietarios de las aplicaciones y juegos que descargo de APKue? </h4>
|
84 |
-
<p>Puede ponerse en contacto con los desarrolladores o los propietarios de las aplicaciones y juegos que descarga de APKue visitando sus sitios web oficiales o páginas de redes sociales. Por lo general, puede encontrar estos enlaces en la aplicación o en la página del juego en APKue. También puedes dejar una reseña o un comentario en la aplicación o en la página del juego en APKue para compartir tus comentarios o reportar cualquier problema. </p> 64aa2da5cf<br />
|
85 |
-
<br />
|
86 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/lib/types/Settings.ts
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import type { Timestamps } from "./Timestamps";
|
2 |
-
|
3 |
-
export interface Settings extends Timestamps {
|
4 |
-
sessionId: string;
|
5 |
-
|
6 |
-
/**
|
7 |
-
* Note: Only conversations with this settings explictly set to true should be shared.
|
8 |
-
*
|
9 |
-
* This setting is explicitly set to true when users accept the ethics modal.
|
10 |
-
* */
|
11 |
-
shareConversationsWithModelAuthors: boolean;
|
12 |
-
ethicsModalAcceptedAt: Date | null;
|
13 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/chardistribution.py
DELETED
@@ -1,261 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is Mozilla Communicator client code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from typing import Tuple, Union
|
29 |
-
|
30 |
-
from .big5freq import (
|
31 |
-
BIG5_CHAR_TO_FREQ_ORDER,
|
32 |
-
BIG5_TABLE_SIZE,
|
33 |
-
BIG5_TYPICAL_DISTRIBUTION_RATIO,
|
34 |
-
)
|
35 |
-
from .euckrfreq import (
|
36 |
-
EUCKR_CHAR_TO_FREQ_ORDER,
|
37 |
-
EUCKR_TABLE_SIZE,
|
38 |
-
EUCKR_TYPICAL_DISTRIBUTION_RATIO,
|
39 |
-
)
|
40 |
-
from .euctwfreq import (
|
41 |
-
EUCTW_CHAR_TO_FREQ_ORDER,
|
42 |
-
EUCTW_TABLE_SIZE,
|
43 |
-
EUCTW_TYPICAL_DISTRIBUTION_RATIO,
|
44 |
-
)
|
45 |
-
from .gb2312freq import (
|
46 |
-
GB2312_CHAR_TO_FREQ_ORDER,
|
47 |
-
GB2312_TABLE_SIZE,
|
48 |
-
GB2312_TYPICAL_DISTRIBUTION_RATIO,
|
49 |
-
)
|
50 |
-
from .jisfreq import (
|
51 |
-
JIS_CHAR_TO_FREQ_ORDER,
|
52 |
-
JIS_TABLE_SIZE,
|
53 |
-
JIS_TYPICAL_DISTRIBUTION_RATIO,
|
54 |
-
)
|
55 |
-
from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
|
56 |
-
|
57 |
-
|
58 |
-
class CharDistributionAnalysis:
|
59 |
-
ENOUGH_DATA_THRESHOLD = 1024
|
60 |
-
SURE_YES = 0.99
|
61 |
-
SURE_NO = 0.01
|
62 |
-
MINIMUM_DATA_THRESHOLD = 3
|
63 |
-
|
64 |
-
def __init__(self) -> None:
|
65 |
-
# Mapping table to get frequency order from char order (get from
|
66 |
-
# GetOrder())
|
67 |
-
self._char_to_freq_order: Tuple[int, ...] = tuple()
|
68 |
-
self._table_size = 0 # Size of above table
|
69 |
-
# This is a constant value which varies from language to language,
|
70 |
-
# used in calculating confidence. See
|
71 |
-
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
|
72 |
-
# for further detail.
|
73 |
-
self.typical_distribution_ratio = 0.0
|
74 |
-
self._done = False
|
75 |
-
self._total_chars = 0
|
76 |
-
self._freq_chars = 0
|
77 |
-
self.reset()
|
78 |
-
|
79 |
-
def reset(self) -> None:
|
80 |
-
"""reset analyser, clear any state"""
|
81 |
-
# If this flag is set to True, detection is done and conclusion has
|
82 |
-
# been made
|
83 |
-
self._done = False
|
84 |
-
self._total_chars = 0 # Total characters encountered
|
85 |
-
# The number of characters whose frequency order is less than 512
|
86 |
-
self._freq_chars = 0
|
87 |
-
|
88 |
-
def feed(self, char: Union[bytes, bytearray], char_len: int) -> None:
|
89 |
-
"""feed a character with known length"""
|
90 |
-
if char_len == 2:
|
91 |
-
# we only care about 2-bytes character in our distribution analysis
|
92 |
-
order = self.get_order(char)
|
93 |
-
else:
|
94 |
-
order = -1
|
95 |
-
if order >= 0:
|
96 |
-
self._total_chars += 1
|
97 |
-
# order is valid
|
98 |
-
if order < self._table_size:
|
99 |
-
if 512 > self._char_to_freq_order[order]:
|
100 |
-
self._freq_chars += 1
|
101 |
-
|
102 |
-
def get_confidence(self) -> float:
|
103 |
-
"""return confidence based on existing data"""
|
104 |
-
# if we didn't receive any character in our consideration range,
|
105 |
-
# return negative answer
|
106 |
-
if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
|
107 |
-
return self.SURE_NO
|
108 |
-
|
109 |
-
if self._total_chars != self._freq_chars:
|
110 |
-
r = self._freq_chars / (
|
111 |
-
(self._total_chars - self._freq_chars) * self.typical_distribution_ratio
|
112 |
-
)
|
113 |
-
if r < self.SURE_YES:
|
114 |
-
return r
|
115 |
-
|
116 |
-
# normalize confidence (we don't want to be 100% sure)
|
117 |
-
return self.SURE_YES
|
118 |
-
|
119 |
-
def got_enough_data(self) -> bool:
|
120 |
-
# It is not necessary to receive all data to draw conclusion.
|
121 |
-
# For charset detection, certain amount of data is enough
|
122 |
-
return self._total_chars > self.ENOUGH_DATA_THRESHOLD
|
123 |
-
|
124 |
-
def get_order(self, _: Union[bytes, bytearray]) -> int:
|
125 |
-
# We do not handle characters based on the original encoding string,
|
126 |
-
# but convert this encoding string to a number, here called order.
|
127 |
-
# This allows multiple encodings of a language to share one frequency
|
128 |
-
# table.
|
129 |
-
return -1
|
130 |
-
|
131 |
-
|
132 |
-
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
|
133 |
-
def __init__(self) -> None:
|
134 |
-
super().__init__()
|
135 |
-
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
|
136 |
-
self._table_size = EUCTW_TABLE_SIZE
|
137 |
-
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
|
138 |
-
|
139 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
140 |
-
# for euc-TW encoding, we are interested
|
141 |
-
# first byte range: 0xc4 -- 0xfe
|
142 |
-
# second byte range: 0xa1 -- 0xfe
|
143 |
-
# no validation needed here. State machine has done that
|
144 |
-
first_char = byte_str[0]
|
145 |
-
if first_char >= 0xC4:
|
146 |
-
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
|
147 |
-
return -1
|
148 |
-
|
149 |
-
|
150 |
-
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
|
151 |
-
def __init__(self) -> None:
|
152 |
-
super().__init__()
|
153 |
-
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
|
154 |
-
self._table_size = EUCKR_TABLE_SIZE
|
155 |
-
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
|
156 |
-
|
157 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
158 |
-
# for euc-KR encoding, we are interested
|
159 |
-
# first byte range: 0xb0 -- 0xfe
|
160 |
-
# second byte range: 0xa1 -- 0xfe
|
161 |
-
# no validation needed here. State machine has done that
|
162 |
-
first_char = byte_str[0]
|
163 |
-
if first_char >= 0xB0:
|
164 |
-
return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
|
165 |
-
return -1
|
166 |
-
|
167 |
-
|
168 |
-
class JOHABDistributionAnalysis(CharDistributionAnalysis):
|
169 |
-
def __init__(self) -> None:
|
170 |
-
super().__init__()
|
171 |
-
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
|
172 |
-
self._table_size = EUCKR_TABLE_SIZE
|
173 |
-
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
|
174 |
-
|
175 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
176 |
-
first_char = byte_str[0]
|
177 |
-
if 0x88 <= first_char < 0xD4:
|
178 |
-
code = first_char * 256 + byte_str[1]
|
179 |
-
return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
|
180 |
-
return -1
|
181 |
-
|
182 |
-
|
183 |
-
class GB2312DistributionAnalysis(CharDistributionAnalysis):
|
184 |
-
def __init__(self) -> None:
|
185 |
-
super().__init__()
|
186 |
-
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
|
187 |
-
self._table_size = GB2312_TABLE_SIZE
|
188 |
-
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
|
189 |
-
|
190 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
191 |
-
# for GB2312 encoding, we are interested
|
192 |
-
# first byte range: 0xb0 -- 0xfe
|
193 |
-
# second byte range: 0xa1 -- 0xfe
|
194 |
-
# no validation needed here. State machine has done that
|
195 |
-
first_char, second_char = byte_str[0], byte_str[1]
|
196 |
-
if (first_char >= 0xB0) and (second_char >= 0xA1):
|
197 |
-
return 94 * (first_char - 0xB0) + second_char - 0xA1
|
198 |
-
return -1
|
199 |
-
|
200 |
-
|
201 |
-
class Big5DistributionAnalysis(CharDistributionAnalysis):
|
202 |
-
def __init__(self) -> None:
|
203 |
-
super().__init__()
|
204 |
-
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
|
205 |
-
self._table_size = BIG5_TABLE_SIZE
|
206 |
-
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
|
207 |
-
|
208 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
209 |
-
# for big5 encoding, we are interested
|
210 |
-
# first byte range: 0xa4 -- 0xfe
|
211 |
-
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
|
212 |
-
# no validation needed here. State machine has done that
|
213 |
-
first_char, second_char = byte_str[0], byte_str[1]
|
214 |
-
if first_char >= 0xA4:
|
215 |
-
if second_char >= 0xA1:
|
216 |
-
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
|
217 |
-
return 157 * (first_char - 0xA4) + second_char - 0x40
|
218 |
-
return -1
|
219 |
-
|
220 |
-
|
221 |
-
class SJISDistributionAnalysis(CharDistributionAnalysis):
|
222 |
-
def __init__(self) -> None:
|
223 |
-
super().__init__()
|
224 |
-
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
|
225 |
-
self._table_size = JIS_TABLE_SIZE
|
226 |
-
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
|
227 |
-
|
228 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
229 |
-
# for sjis encoding, we are interested
|
230 |
-
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
|
231 |
-
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
|
232 |
-
# no validation needed here. State machine has done that
|
233 |
-
first_char, second_char = byte_str[0], byte_str[1]
|
234 |
-
if 0x81 <= first_char <= 0x9F:
|
235 |
-
order = 188 * (first_char - 0x81)
|
236 |
-
elif 0xE0 <= first_char <= 0xEF:
|
237 |
-
order = 188 * (first_char - 0xE0 + 31)
|
238 |
-
else:
|
239 |
-
return -1
|
240 |
-
order = order + second_char - 0x40
|
241 |
-
if second_char > 0x7F:
|
242 |
-
order = -1
|
243 |
-
return order
|
244 |
-
|
245 |
-
|
246 |
-
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
|
247 |
-
def __init__(self) -> None:
|
248 |
-
super().__init__()
|
249 |
-
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
|
250 |
-
self._table_size = JIS_TABLE_SIZE
|
251 |
-
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
|
252 |
-
|
253 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
254 |
-
# for euc-JP encoding, we are interested
|
255 |
-
# first byte range: 0xa0 -- 0xfe
|
256 |
-
# second byte range: 0xa1 -- 0xfe
|
257 |
-
# no validation needed here. State machine has done that
|
258 |
-
char = byte_str[0]
|
259 |
-
if char >= 0xA0:
|
260 |
-
return 94 * (char - 0xA1) + byte_str[1] - 0xA1
|
261 |
-
return -1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_entry_points.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
import operator
|
3 |
-
import itertools
|
4 |
-
|
5 |
-
from .extern.jaraco.text import yield_lines
|
6 |
-
from .extern.jaraco.functools import pass_none
|
7 |
-
from ._importlib import metadata
|
8 |
-
from ._itertools import ensure_unique
|
9 |
-
from .extern.more_itertools import consume
|
10 |
-
|
11 |
-
|
12 |
-
def ensure_valid(ep):
|
13 |
-
"""
|
14 |
-
Exercise one of the dynamic properties to trigger
|
15 |
-
the pattern match.
|
16 |
-
"""
|
17 |
-
ep.extras
|
18 |
-
|
19 |
-
|
20 |
-
def load_group(value, group):
|
21 |
-
"""
|
22 |
-
Given a value of an entry point or series of entry points,
|
23 |
-
return each as an EntryPoint.
|
24 |
-
"""
|
25 |
-
# normalize to a single sequence of lines
|
26 |
-
lines = yield_lines(value)
|
27 |
-
text = f'[{group}]\n' + '\n'.join(lines)
|
28 |
-
return metadata.EntryPoints._from_text(text)
|
29 |
-
|
30 |
-
|
31 |
-
def by_group_and_name(ep):
|
32 |
-
return ep.group, ep.name
|
33 |
-
|
34 |
-
|
35 |
-
def validate(eps: metadata.EntryPoints):
|
36 |
-
"""
|
37 |
-
Ensure entry points are unique by group and name and validate each.
|
38 |
-
"""
|
39 |
-
consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))
|
40 |
-
return eps
|
41 |
-
|
42 |
-
|
43 |
-
@functools.singledispatch
|
44 |
-
def load(eps):
|
45 |
-
"""
|
46 |
-
Given a Distribution.entry_points, produce EntryPoints.
|
47 |
-
"""
|
48 |
-
groups = itertools.chain.from_iterable(
|
49 |
-
load_group(value, group)
|
50 |
-
for group, value in eps.items())
|
51 |
-
return validate(metadata.EntryPoints(groups))
|
52 |
-
|
53 |
-
|
54 |
-
@load.register(str)
|
55 |
-
def _(eps):
|
56 |
-
r"""
|
57 |
-
>>> ep, = load('[console_scripts]\nfoo=bar')
|
58 |
-
>>> ep.group
|
59 |
-
'console_scripts'
|
60 |
-
>>> ep.name
|
61 |
-
'foo'
|
62 |
-
>>> ep.value
|
63 |
-
'bar'
|
64 |
-
"""
|
65 |
-
return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))
|
66 |
-
|
67 |
-
|
68 |
-
load.register(type(None), lambda x: x)
|
69 |
-
|
70 |
-
|
71 |
-
@pass_none
|
72 |
-
def render(eps: metadata.EntryPoints):
|
73 |
-
by_group = operator.attrgetter('group')
|
74 |
-
groups = itertools.groupby(sorted(eps, key=by_group), by_group)
|
75 |
-
|
76 |
-
return '\n'.join(
|
77 |
-
f'[{group}]\n{render_items(items)}\n'
|
78 |
-
for group, items in groups
|
79 |
-
)
|
80 |
-
|
81 |
-
|
82 |
-
def render_items(eps):
|
83 |
-
return '\n'.join(
|
84 |
-
f'{ep.name} = {ep.value}'
|
85 |
-
for ep in sorted(eps)
|
86 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Biliovo/anime-remove-background/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Remove Background
|
3 |
-
emoji: 🪄🖼️
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: skytnt/anime-remove-background
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/demo/predictor.py
DELETED
@@ -1,220 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import atexit
|
3 |
-
import bisect
|
4 |
-
import multiprocessing as mp
|
5 |
-
from collections import deque
|
6 |
-
import cv2
|
7 |
-
import torch
|
8 |
-
|
9 |
-
from detectron2.data import MetadataCatalog
|
10 |
-
from detectron2.engine.defaults import DefaultPredictor
|
11 |
-
from detectron2.utils.video_visualizer import VideoVisualizer
|
12 |
-
from detectron2.utils.visualizer import ColorMode, Visualizer
|
13 |
-
|
14 |
-
|
15 |
-
class VisualizationDemo(object):
|
16 |
-
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
|
17 |
-
"""
|
18 |
-
Args:
|
19 |
-
cfg (CfgNode):
|
20 |
-
instance_mode (ColorMode):
|
21 |
-
parallel (bool): whether to run the model in different processes from visualization.
|
22 |
-
Useful since the visualization logic can be slow.
|
23 |
-
"""
|
24 |
-
self.metadata = MetadataCatalog.get(
|
25 |
-
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
|
26 |
-
)
|
27 |
-
self.cpu_device = torch.device("cpu")
|
28 |
-
self.instance_mode = instance_mode
|
29 |
-
|
30 |
-
self.parallel = parallel
|
31 |
-
if parallel:
|
32 |
-
num_gpu = torch.cuda.device_count()
|
33 |
-
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
|
34 |
-
else:
|
35 |
-
self.predictor = DefaultPredictor(cfg)
|
36 |
-
|
37 |
-
def run_on_image(self, image):
|
38 |
-
"""
|
39 |
-
Args:
|
40 |
-
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
|
41 |
-
This is the format used by OpenCV.
|
42 |
-
|
43 |
-
Returns:
|
44 |
-
predictions (dict): the output of the model.
|
45 |
-
vis_output (VisImage): the visualized image output.
|
46 |
-
"""
|
47 |
-
vis_output = None
|
48 |
-
predictions = self.predictor(image)
|
49 |
-
# Convert image from OpenCV BGR format to Matplotlib RGB format.
|
50 |
-
image = image[:, :, ::-1]
|
51 |
-
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
|
52 |
-
if "panoptic_seg" in predictions:
|
53 |
-
panoptic_seg, segments_info = predictions["panoptic_seg"]
|
54 |
-
vis_output = visualizer.draw_panoptic_seg_predictions(
|
55 |
-
panoptic_seg.to(self.cpu_device), segments_info
|
56 |
-
)
|
57 |
-
else:
|
58 |
-
if "sem_seg" in predictions:
|
59 |
-
vis_output = visualizer.draw_sem_seg(
|
60 |
-
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
|
61 |
-
)
|
62 |
-
if "instances" in predictions:
|
63 |
-
instances = predictions["instances"].to(self.cpu_device)
|
64 |
-
vis_output = visualizer.draw_instance_predictions(predictions=instances)
|
65 |
-
|
66 |
-
return predictions, vis_output
|
67 |
-
|
68 |
-
def _frame_from_video(self, video):
|
69 |
-
while video.isOpened():
|
70 |
-
success, frame = video.read()
|
71 |
-
if success:
|
72 |
-
yield frame
|
73 |
-
else:
|
74 |
-
break
|
75 |
-
|
76 |
-
def run_on_video(self, video):
|
77 |
-
"""
|
78 |
-
Visualizes predictions on frames of the input video.
|
79 |
-
|
80 |
-
Args:
|
81 |
-
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
|
82 |
-
either a webcam or a video file.
|
83 |
-
|
84 |
-
Yields:
|
85 |
-
ndarray: BGR visualizations of each video frame.
|
86 |
-
"""
|
87 |
-
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
|
88 |
-
|
89 |
-
def process_predictions(frame, predictions):
|
90 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
91 |
-
if "panoptic_seg" in predictions:
|
92 |
-
panoptic_seg, segments_info = predictions["panoptic_seg"]
|
93 |
-
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
|
94 |
-
frame, panoptic_seg.to(self.cpu_device), segments_info
|
95 |
-
)
|
96 |
-
elif "instances" in predictions:
|
97 |
-
predictions = predictions["instances"].to(self.cpu_device)
|
98 |
-
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
|
99 |
-
elif "sem_seg" in predictions:
|
100 |
-
vis_frame = video_visualizer.draw_sem_seg(
|
101 |
-
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
|
102 |
-
)
|
103 |
-
|
104 |
-
# Converts Matplotlib RGB format to OpenCV BGR format
|
105 |
-
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
|
106 |
-
return vis_frame
|
107 |
-
|
108 |
-
frame_gen = self._frame_from_video(video)
|
109 |
-
if self.parallel:
|
110 |
-
buffer_size = self.predictor.default_buffer_size
|
111 |
-
|
112 |
-
frame_data = deque()
|
113 |
-
|
114 |
-
for cnt, frame in enumerate(frame_gen):
|
115 |
-
frame_data.append(frame)
|
116 |
-
self.predictor.put(frame)
|
117 |
-
|
118 |
-
if cnt >= buffer_size:
|
119 |
-
frame = frame_data.popleft()
|
120 |
-
predictions = self.predictor.get()
|
121 |
-
yield process_predictions(frame, predictions)
|
122 |
-
|
123 |
-
while len(frame_data):
|
124 |
-
frame = frame_data.popleft()
|
125 |
-
predictions = self.predictor.get()
|
126 |
-
yield process_predictions(frame, predictions)
|
127 |
-
else:
|
128 |
-
for frame in frame_gen:
|
129 |
-
yield process_predictions(frame, self.predictor(frame))
|
130 |
-
|
131 |
-
|
132 |
-
class AsyncPredictor:
|
133 |
-
"""
|
134 |
-
A predictor that runs the model asynchronously, possibly on >1 GPUs.
|
135 |
-
Because rendering the visualization takes considerably amount of time,
|
136 |
-
this helps improve throughput when rendering videos.
|
137 |
-
"""
|
138 |
-
|
139 |
-
class _StopToken:
|
140 |
-
pass
|
141 |
-
|
142 |
-
class _PredictWorker(mp.Process):
|
143 |
-
def __init__(self, cfg, task_queue, result_queue):
|
144 |
-
self.cfg = cfg
|
145 |
-
self.task_queue = task_queue
|
146 |
-
self.result_queue = result_queue
|
147 |
-
super().__init__()
|
148 |
-
|
149 |
-
def run(self):
|
150 |
-
predictor = DefaultPredictor(self.cfg)
|
151 |
-
|
152 |
-
while True:
|
153 |
-
task = self.task_queue.get()
|
154 |
-
if isinstance(task, AsyncPredictor._StopToken):
|
155 |
-
break
|
156 |
-
idx, data = task
|
157 |
-
result = predictor(data)
|
158 |
-
self.result_queue.put((idx, result))
|
159 |
-
|
160 |
-
def __init__(self, cfg, num_gpus: int = 1):
|
161 |
-
"""
|
162 |
-
Args:
|
163 |
-
cfg (CfgNode):
|
164 |
-
num_gpus (int): if 0, will run on CPU
|
165 |
-
"""
|
166 |
-
num_workers = max(num_gpus, 1)
|
167 |
-
self.task_queue = mp.Queue(maxsize=num_workers * 3)
|
168 |
-
self.result_queue = mp.Queue(maxsize=num_workers * 3)
|
169 |
-
self.procs = []
|
170 |
-
for gpuid in range(max(num_gpus, 1)):
|
171 |
-
cfg = cfg.clone()
|
172 |
-
cfg.defrost()
|
173 |
-
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
|
174 |
-
self.procs.append(
|
175 |
-
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
|
176 |
-
)
|
177 |
-
|
178 |
-
self.put_idx = 0
|
179 |
-
self.get_idx = 0
|
180 |
-
self.result_rank = []
|
181 |
-
self.result_data = []
|
182 |
-
|
183 |
-
for p in self.procs:
|
184 |
-
p.start()
|
185 |
-
atexit.register(self.shutdown)
|
186 |
-
|
187 |
-
def put(self, image):
|
188 |
-
self.put_idx += 1
|
189 |
-
self.task_queue.put((self.put_idx, image))
|
190 |
-
|
191 |
-
def get(self):
|
192 |
-
self.get_idx += 1 # the index needed for this request
|
193 |
-
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
|
194 |
-
res = self.result_data[0]
|
195 |
-
del self.result_data[0], self.result_rank[0]
|
196 |
-
return res
|
197 |
-
|
198 |
-
while True:
|
199 |
-
# make sure the results are returned in the correct order
|
200 |
-
idx, res = self.result_queue.get()
|
201 |
-
if idx == self.get_idx:
|
202 |
-
return res
|
203 |
-
insert = bisect.bisect(self.result_rank, idx)
|
204 |
-
self.result_rank.insert(insert, idx)
|
205 |
-
self.result_data.insert(insert, res)
|
206 |
-
|
207 |
-
def __len__(self):
|
208 |
-
return self.put_idx - self.get_idx
|
209 |
-
|
210 |
-
def __call__(self, image):
|
211 |
-
self.put(image)
|
212 |
-
return self.get()
|
213 |
-
|
214 |
-
def shutdown(self):
|
215 |
-
for _ in self.procs:
|
216 |
-
self.task_queue.put(AsyncPredictor._StopToken())
|
217 |
-
|
218 |
-
@property
|
219 |
-
def default_buffer_size(self):
|
220 |
-
return len(self.procs) * 5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/batch_norm.py
DELETED
@@ -1,237 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import torch
|
4 |
-
import torch.distributed as dist
|
5 |
-
from torch import nn
|
6 |
-
from torch.autograd.function import Function
|
7 |
-
from torch.nn import functional as F
|
8 |
-
|
9 |
-
from detectron2.utils import comm
|
10 |
-
|
11 |
-
from .wrappers import BatchNorm2d
|
12 |
-
|
13 |
-
|
14 |
-
class FrozenBatchNorm2d(nn.Module):
|
15 |
-
"""
|
16 |
-
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
17 |
-
|
18 |
-
It contains non-trainable buffers called
|
19 |
-
"weight" and "bias", "running_mean", "running_var",
|
20 |
-
initialized to perform identity transformation.
|
21 |
-
|
22 |
-
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
|
23 |
-
which are computed from the original four parameters of BN.
|
24 |
-
The affine transform `x * weight + bias` will perform the equivalent
|
25 |
-
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
|
26 |
-
When loading a backbone model from Caffe2, "running_mean" and "running_var"
|
27 |
-
will be left unchanged as identity transformation.
|
28 |
-
|
29 |
-
Other pre-trained backbone models may contain all 4 parameters.
|
30 |
-
|
31 |
-
The forward is implemented by `F.batch_norm(..., training=False)`.
|
32 |
-
"""
|
33 |
-
|
34 |
-
_version = 3
|
35 |
-
|
36 |
-
def __init__(self, num_features, eps=1e-5):
|
37 |
-
super().__init__()
|
38 |
-
self.num_features = num_features
|
39 |
-
self.eps = eps
|
40 |
-
self.register_buffer("weight", torch.ones(num_features))
|
41 |
-
self.register_buffer("bias", torch.zeros(num_features))
|
42 |
-
self.register_buffer("running_mean", torch.zeros(num_features))
|
43 |
-
self.register_buffer("running_var", torch.ones(num_features) - eps)
|
44 |
-
|
45 |
-
def forward(self, x):
|
46 |
-
if x.requires_grad:
|
47 |
-
# When gradients are needed, F.batch_norm will use extra memory
|
48 |
-
# because its backward op computes gradients for weight/bias as well.
|
49 |
-
scale = self.weight * (self.running_var + self.eps).rsqrt()
|
50 |
-
bias = self.bias - self.running_mean * scale
|
51 |
-
scale = scale.reshape(1, -1, 1, 1)
|
52 |
-
bias = bias.reshape(1, -1, 1, 1)
|
53 |
-
return x * scale + bias
|
54 |
-
else:
|
55 |
-
# When gradients are not needed, F.batch_norm is a single fused op
|
56 |
-
# and provide more optimization opportunities.
|
57 |
-
return F.batch_norm(
|
58 |
-
x,
|
59 |
-
self.running_mean,
|
60 |
-
self.running_var,
|
61 |
-
self.weight,
|
62 |
-
self.bias,
|
63 |
-
training=False,
|
64 |
-
eps=self.eps,
|
65 |
-
)
|
66 |
-
|
67 |
-
def _load_from_state_dict(
|
68 |
-
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
69 |
-
):
|
70 |
-
version = local_metadata.get("version", None)
|
71 |
-
|
72 |
-
if version is None or version < 2:
|
73 |
-
# No running_mean/var in early versions
|
74 |
-
# This will silent the warnings
|
75 |
-
if prefix + "running_mean" not in state_dict:
|
76 |
-
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
|
77 |
-
if prefix + "running_var" not in state_dict:
|
78 |
-
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
|
79 |
-
|
80 |
-
if version is not None and version < 3:
|
81 |
-
logger = logging.getLogger(__name__)
|
82 |
-
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
|
83 |
-
# In version < 3, running_var are used without +eps.
|
84 |
-
state_dict[prefix + "running_var"] -= self.eps
|
85 |
-
|
86 |
-
super()._load_from_state_dict(
|
87 |
-
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
88 |
-
)
|
89 |
-
|
90 |
-
def __repr__(self):
|
91 |
-
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
|
92 |
-
|
93 |
-
@classmethod
|
94 |
-
def convert_frozen_batchnorm(cls, module):
|
95 |
-
"""
|
96 |
-
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
module (torch.nn.Module):
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
If module is BatchNorm/SyncBatchNorm, returns a new module.
|
103 |
-
Otherwise, in-place convert module and return it.
|
104 |
-
|
105 |
-
Similar to convert_sync_batchnorm in
|
106 |
-
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
|
107 |
-
"""
|
108 |
-
bn_module = nn.modules.batchnorm
|
109 |
-
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
|
110 |
-
res = module
|
111 |
-
if isinstance(module, bn_module):
|
112 |
-
res = cls(module.num_features)
|
113 |
-
if module.affine:
|
114 |
-
res.weight.data = module.weight.data.clone().detach()
|
115 |
-
res.bias.data = module.bias.data.clone().detach()
|
116 |
-
res.running_mean.data = module.running_mean.data
|
117 |
-
res.running_var.data = module.running_var.data
|
118 |
-
res.eps = module.eps
|
119 |
-
else:
|
120 |
-
for name, child in module.named_children():
|
121 |
-
new_child = cls.convert_frozen_batchnorm(child)
|
122 |
-
if new_child is not child:
|
123 |
-
res.add_module(name, new_child)
|
124 |
-
return res
|
125 |
-
|
126 |
-
|
127 |
-
def get_norm(norm, out_channels):
|
128 |
-
"""
|
129 |
-
Args:
|
130 |
-
norm (str or callable):
|
131 |
-
|
132 |
-
Returns:
|
133 |
-
nn.Module or None: the normalization layer
|
134 |
-
"""
|
135 |
-
if isinstance(norm, str):
|
136 |
-
if len(norm) == 0:
|
137 |
-
return None
|
138 |
-
norm = {
|
139 |
-
"BN": BatchNorm2d,
|
140 |
-
"SyncBN": NaiveSyncBatchNorm,
|
141 |
-
"FrozenBN": FrozenBatchNorm2d,
|
142 |
-
"GN": lambda channels: nn.GroupNorm(32, channels),
|
143 |
-
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
|
144 |
-
}[norm]
|
145 |
-
return norm(out_channels)
|
146 |
-
|
147 |
-
|
148 |
-
class AllReduce(Function):
|
149 |
-
@staticmethod
|
150 |
-
def forward(ctx, input):
|
151 |
-
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
|
152 |
-
# Use allgather instead of allreduce since I don't trust in-place operations ..
|
153 |
-
dist.all_gather(input_list, input, async_op=False)
|
154 |
-
inputs = torch.stack(input_list, dim=0)
|
155 |
-
return torch.sum(inputs, dim=0)
|
156 |
-
|
157 |
-
@staticmethod
|
158 |
-
def backward(ctx, grad_output):
|
159 |
-
dist.all_reduce(grad_output, async_op=False)
|
160 |
-
return grad_output
|
161 |
-
|
162 |
-
|
163 |
-
class NaiveSyncBatchNorm(BatchNorm2d):
|
164 |
-
"""
|
165 |
-
`torch.nn.SyncBatchNorm` has known unknown bugs.
|
166 |
-
It produces significantly worse AP (and sometimes goes NaN)
|
167 |
-
when the batch size on each worker is quite different
|
168 |
-
(e.g., when scale augmentation is used, or when it is applied to mask head).
|
169 |
-
|
170 |
-
Use this implementation before `nn.SyncBatchNorm` is fixed.
|
171 |
-
It is slower than `nn.SyncBatchNorm`.
|
172 |
-
|
173 |
-
Note:
|
174 |
-
There isn't a single definition of Sync BatchNorm.
|
175 |
-
|
176 |
-
When ``stats_mode==""``, this module computes overall statistics by using
|
177 |
-
statistics of each worker with equal weight. The result is true statistics
|
178 |
-
of all samples (as if they are all on one worker) only when all workers
|
179 |
-
have the same (N, H, W). This mode does not support inputs with zero batch size.
|
180 |
-
|
181 |
-
When ``stats_mode=="N"``, this module computes overall statistics by weighting
|
182 |
-
the statistics of each worker by their ``N``. The result is true statistics
|
183 |
-
of all samples (as if they are all on one worker) only when all workers
|
184 |
-
have the same (H, W). It is slower than ``stats_mode==""``.
|
185 |
-
|
186 |
-
Even though the result of this module may not be the true statistics of all samples,
|
187 |
-
it may still be reasonable because it might be preferrable to assign equal weights
|
188 |
-
to all workers, regardless of their (H, W) dimension, instead of putting larger weight
|
189 |
-
on larger images. From preliminary experiments, little difference is found between such
|
190 |
-
a simplified implementation and an accurate computation of overall mean & variance.
|
191 |
-
"""
|
192 |
-
|
193 |
-
def __init__(self, *args, stats_mode="", **kwargs):
|
194 |
-
super().__init__(*args, **kwargs)
|
195 |
-
assert stats_mode in ["", "N"]
|
196 |
-
self._stats_mode = stats_mode
|
197 |
-
|
198 |
-
def forward(self, input):
|
199 |
-
if comm.get_world_size() == 1 or not self.training:
|
200 |
-
return super().forward(input)
|
201 |
-
|
202 |
-
B, C = input.shape[0], input.shape[1]
|
203 |
-
|
204 |
-
mean = torch.mean(input, dim=[0, 2, 3])
|
205 |
-
meansqr = torch.mean(input * input, dim=[0, 2, 3])
|
206 |
-
|
207 |
-
if self._stats_mode == "":
|
208 |
-
assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
|
209 |
-
vec = torch.cat([mean, meansqr], dim=0)
|
210 |
-
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
|
211 |
-
mean, meansqr = torch.split(vec, C)
|
212 |
-
momentum = self.momentum
|
213 |
-
else:
|
214 |
-
if B == 0:
|
215 |
-
vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
|
216 |
-
vec = vec + input.sum() # make sure there is gradient w.r.t input
|
217 |
-
else:
|
218 |
-
vec = torch.cat(
|
219 |
-
[mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
|
220 |
-
)
|
221 |
-
vec = AllReduce.apply(vec * B)
|
222 |
-
|
223 |
-
total_batch = vec[-1].detach()
|
224 |
-
momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
|
225 |
-
total_batch = torch.max(total_batch, torch.ones_like(total_batch)) # avoid div-by-zero
|
226 |
-
mean, meansqr, _ = torch.split(vec / total_batch, C)
|
227 |
-
|
228 |
-
var = meansqr - mean * mean
|
229 |
-
invstd = torch.rsqrt(var + self.eps)
|
230 |
-
scale = self.weight * invstd
|
231 |
-
bias = self.bias - mean * scale
|
232 |
-
scale = scale.reshape(1, -1, 1, 1)
|
233 |
-
bias = bias.reshape(1, -1, 1, 1)
|
234 |
-
|
235 |
-
self.running_mean += momentum * (mean.detach() - self.running_mean)
|
236 |
-
self.running_var += momentum * (var.detach() - self.running_var)
|
237 |
-
return input * scale + bias
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/cdf.h
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
|
5 |
-
DEVICE int sample(const float *cdf, int num_entries, float u, float *updated_u = nullptr) {
|
6 |
-
// Binary search the cdf
|
7 |
-
auto lb = 0;
|
8 |
-
auto len = num_entries - 1 - lb;
|
9 |
-
while (len > 0) {
|
10 |
-
auto half_len = len / 2;
|
11 |
-
auto mid = lb + half_len;
|
12 |
-
assert(mid >= 0 && mid < num_entries);
|
13 |
-
if (u < cdf[mid]) {
|
14 |
-
len = half_len;
|
15 |
-
} else {
|
16 |
-
lb = mid + 1;
|
17 |
-
len = len - half_len - 1;
|
18 |
-
}
|
19 |
-
}
|
20 |
-
lb = clamp(lb, 0, num_entries - 1);
|
21 |
-
if (updated_u != nullptr) {
|
22 |
-
if (lb > 0) {
|
23 |
-
*updated_u = (u - cdf[lb - 1]) / (cdf[lb] - cdf[lb - 1]);
|
24 |
-
} else {
|
25 |
-
*updated_u = u / cdf[lb];
|
26 |
-
}
|
27 |
-
}
|
28 |
-
return lb;
|
29 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/minimum_type.h
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/type_traits.h>
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
|
24 |
-
namespace detail
|
25 |
-
{
|
26 |
-
|
27 |
-
namespace minimum_type_detail
|
28 |
-
{
|
29 |
-
|
30 |
-
//
|
31 |
-
// Returns the minimum type or is empty
|
32 |
-
// if T1 and T2 are unrelated.
|
33 |
-
//
|
34 |
-
template <typename T1, typename T2, bool GreaterEqual, bool LessEqual> struct minimum_type_impl {};
|
35 |
-
|
36 |
-
template <typename T1, typename T2>
|
37 |
-
struct minimum_type_impl<T1,T2,true,false>
|
38 |
-
{
|
39 |
-
typedef T2 type;
|
40 |
-
}; // end minimum_type_impl
|
41 |
-
|
42 |
-
template <typename T1, typename T2>
|
43 |
-
struct minimum_type_impl<T1,T2,false,true>
|
44 |
-
{
|
45 |
-
typedef T1 type;
|
46 |
-
}; // end minimum_type_impl
|
47 |
-
|
48 |
-
template <typename T1, typename T2>
|
49 |
-
struct minimum_type_impl<T1,T2,true,true>
|
50 |
-
{
|
51 |
-
typedef T1 type;
|
52 |
-
}; // end minimum_type_impl
|
53 |
-
|
54 |
-
template <typename T1, typename T2>
|
55 |
-
struct primitive_minimum_type
|
56 |
-
: minimum_type_detail::minimum_type_impl<
|
57 |
-
T1,
|
58 |
-
T2,
|
59 |
-
::thrust::detail::is_convertible<T1,T2>::value,
|
60 |
-
::thrust::detail::is_convertible<T2,T1>::value
|
61 |
-
>
|
62 |
-
{
|
63 |
-
}; // end primitive_minimum_type
|
64 |
-
|
65 |
-
// because some types are not convertible (even to themselves)
|
66 |
-
// specialize primitive_minimum_type for when both types are identical
|
67 |
-
template <typename T>
|
68 |
-
struct primitive_minimum_type<T,T>
|
69 |
-
{
|
70 |
-
typedef T type;
|
71 |
-
}; // end primitive_minimum_type
|
72 |
-
|
73 |
-
// XXX this belongs somewhere more general
|
74 |
-
struct any_conversion
|
75 |
-
{
|
76 |
-
template<typename T> operator T (void);
|
77 |
-
};
|
78 |
-
|
79 |
-
} // end minimum_type_detail
|
80 |
-
|
81 |
-
template<typename T1,
|
82 |
-
typename T2 = minimum_type_detail::any_conversion,
|
83 |
-
typename T3 = minimum_type_detail::any_conversion,
|
84 |
-
typename T4 = minimum_type_detail::any_conversion,
|
85 |
-
typename T5 = minimum_type_detail::any_conversion,
|
86 |
-
typename T6 = minimum_type_detail::any_conversion,
|
87 |
-
typename T7 = minimum_type_detail::any_conversion,
|
88 |
-
typename T8 = minimum_type_detail::any_conversion,
|
89 |
-
typename T9 = minimum_type_detail::any_conversion,
|
90 |
-
typename T10 = minimum_type_detail::any_conversion,
|
91 |
-
typename T11 = minimum_type_detail::any_conversion,
|
92 |
-
typename T12 = minimum_type_detail::any_conversion,
|
93 |
-
typename T13 = minimum_type_detail::any_conversion,
|
94 |
-
typename T14 = minimum_type_detail::any_conversion,
|
95 |
-
typename T15 = minimum_type_detail::any_conversion,
|
96 |
-
typename T16 = minimum_type_detail::any_conversion>
|
97 |
-
struct minimum_type;
|
98 |
-
|
99 |
-
// base case
|
100 |
-
template<typename T1, typename T2>
|
101 |
-
struct minimum_type<T1,T2>
|
102 |
-
: minimum_type_detail::primitive_minimum_type<T1,T2>
|
103 |
-
{};
|
104 |
-
|
105 |
-
template<typename T1, typename T2>
|
106 |
-
struct lazy_minimum_type
|
107 |
-
: minimum_type<
|
108 |
-
typename T1::type,
|
109 |
-
typename T2::type
|
110 |
-
>
|
111 |
-
{};
|
112 |
-
|
113 |
-
// carefully avoid referring to a nested ::type which may not exist
|
114 |
-
template<typename T1, typename T2, typename T3, typename T4,
|
115 |
-
typename T5, typename T6, typename T7, typename T8,
|
116 |
-
typename T9, typename T10, typename T11, typename T12,
|
117 |
-
typename T13, typename T14, typename T15, typename T16>
|
118 |
-
struct minimum_type
|
119 |
-
: lazy_minimum_type<
|
120 |
-
lazy_minimum_type<
|
121 |
-
lazy_minimum_type<
|
122 |
-
minimum_type<
|
123 |
-
T1,T2
|
124 |
-
>,
|
125 |
-
minimum_type<
|
126 |
-
T3,T4
|
127 |
-
>
|
128 |
-
>,
|
129 |
-
lazy_minimum_type<
|
130 |
-
minimum_type<
|
131 |
-
T5,T6
|
132 |
-
>,
|
133 |
-
minimum_type<
|
134 |
-
T7,T8
|
135 |
-
>
|
136 |
-
>
|
137 |
-
>,
|
138 |
-
lazy_minimum_type<
|
139 |
-
lazy_minimum_type<
|
140 |
-
minimum_type<
|
141 |
-
T9,T10
|
142 |
-
>,
|
143 |
-
minimum_type<
|
144 |
-
T11,T12
|
145 |
-
>
|
146 |
-
>,
|
147 |
-
lazy_minimum_type<
|
148 |
-
minimum_type<
|
149 |
-
T13,T14
|
150 |
-
>,
|
151 |
-
minimum_type<
|
152 |
-
T15,T16
|
153 |
-
>
|
154 |
-
>
|
155 |
-
>
|
156 |
-
>
|
157 |
-
{};
|
158 |
-
|
159 |
-
} // end detail
|
160 |
-
|
161 |
-
} // end thrust
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/transform_scan.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special version of this algorithm
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
2 |
-
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
3 |
-
We are expecting to ship plugin support within the week (PR #757).
|
4 |
-
Resources:
|
5 |
-
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
6 |
-
-->
|
7 |
-
|
8 |
-
<!-- 📢 Announcement
|
9 |
-
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
10 |
-
|
11 |
-
Focus on a single, specific change.
|
12 |
-
Do not include any unrelated or "extra" modifications.
|
13 |
-
Provide clear documentation and explanations of the changes made.
|
14 |
-
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
15 |
-
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
16 |
-
|
17 |
-
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
18 |
-
|
19 |
-
### Background
|
20 |
-
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
|
21 |
-
|
22 |
-
### Changes
|
23 |
-
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
|
24 |
-
|
25 |
-
### Documentation
|
26 |
-
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
|
27 |
-
|
28 |
-
### Test Plan
|
29 |
-
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
|
30 |
-
|
31 |
-
### PR Quality Checklist
|
32 |
-
- [ ] My pull request is atomic and focuses on a single change.
|
33 |
-
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
34 |
-
- [ ] I have considered potential risks and mitigations for my changes.
|
35 |
-
- [ ] I have documented my changes clearly and comprehensively.
|
36 |
-
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
37 |
-
|
38 |
-
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
39 |
-
|
40 |
-
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chomkwoy/Nilkessye/image_text_align.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
import pathlib
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
import Levenshtein
|
5 |
-
import numpy as np
|
6 |
-
from PIL import Image
|
7 |
-
from tqdm.auto import tqdm
|
8 |
-
|
9 |
-
import load_book
|
10 |
-
import model
|
11 |
-
import ocr_utils
|
12 |
-
from syllable_model import SyllableRecognizer
|
13 |
-
|
14 |
-
|
15 |
-
def image_text_align(
|
16 |
-
filename: str,
|
17 |
-
sentences: List[dict],
|
18 |
-
cur_page: str,
|
19 |
-
dgju_dict: dict,
|
20 |
-
centernet: model.exkp,
|
21 |
-
recog: SyllableRecognizer,
|
22 |
-
display=None
|
23 |
-
):
|
24 |
-
# Detect page
|
25 |
-
orig_image, orig_image_bbox, orig_size = load_book.process_page(filename, thresholding=False)
|
26 |
-
pred_syllables, line_infos = ocr_utils.recognize_page(orig_image, centernet, recog, return_line_infos=True)
|
27 |
-
|
28 |
-
pred_bboxes = [item for line in line_infos for item in line['line']]
|
29 |
-
|
30 |
-
# Parse ground truth text
|
31 |
-
cand_page_syllables = load_book.parse_book_text(sentences, cur_page, dgju_dict)
|
32 |
-
|
33 |
-
# Construct candidate expected texts
|
34 |
-
cand_expected_texts = []
|
35 |
-
for cand in cand_page_syllables:
|
36 |
-
expected_text = []
|
37 |
-
for syllable in cand:
|
38 |
-
if load_book.HANJA_RE.match(syllable['syllable']):
|
39 |
-
expected_text.append('〓')
|
40 |
-
elif syllable['syllable'] == '?' and len(syllable['possibilities']) > 0:
|
41 |
-
expected_text.append(syllable['possibilities'][0])
|
42 |
-
else:
|
43 |
-
expected_text.append(syllable['syllable'])
|
44 |
-
cand_expected_texts.append(expected_text)
|
45 |
-
|
46 |
-
if display is not None:
|
47 |
-
print("gt =", '.'.join(cand_expected_texts[0]))
|
48 |
-
print("pred=", '.'.join(pred_syllables))
|
49 |
-
|
50 |
-
# Find out which one is correct
|
51 |
-
pred_text = '.'.join(pred_syllables)
|
52 |
-
leven_dists = [
|
53 |
-
Levenshtein.distance(pred_text, '.'.join(cand))
|
54 |
-
for cand in cand_expected_texts
|
55 |
-
]
|
56 |
-
gt_idx = np.argmin(leven_dists)
|
57 |
-
gt_syllables = cand_page_syllables[gt_idx]
|
58 |
-
|
59 |
-
avg_dist = leven_dists[gt_idx] / len(pred_syllables)
|
60 |
-
if avg_dist > 2.0:
|
61 |
-
print('WARNING: average levenshtein dist > 2.0')
|
62 |
-
return False
|
63 |
-
|
64 |
-
# Align text
|
65 |
-
expected_text = cand_expected_texts[gt_idx]
|
66 |
-
pred_syll_to_gt_syll = load_book.match_syllables(pred_syllables, expected_text)
|
67 |
-
|
68 |
-
# Align text & image
|
69 |
-
for pred_syll_idx, (pred, (bbox, _, _, cls)) in enumerate(zip(tqdm(pred_syllables), pred_bboxes)):
|
70 |
-
(tlx, tly), (brx, bry) = bbox
|
71 |
-
w, h = brx - tlx, bry - tly
|
72 |
-
pw, ph = w / 5, h / 5
|
73 |
-
tile = orig_image[
|
74 |
-
max(0, int(tly - ph)):min(orig_image.shape[0], int(bry + ph)),
|
75 |
-
max(0, int(tlx - pw)):min(orig_image.shape[1], int(brx + pw)),
|
76 |
-
]
|
77 |
-
|
78 |
-
# Find corresponding ground truth syllable
|
79 |
-
gt_syll_idx = pred_syll_to_gt_syll[pred_syll_idx]
|
80 |
-
if gt_syll_idx is None:
|
81 |
-
continue
|
82 |
-
gt = gt_syllables[gt_syll_idx]
|
83 |
-
|
84 |
-
if load_book.HANJA_RE.match(gt['syllable']):
|
85 |
-
possibilities = [gt['syllable']]
|
86 |
-
elif 'possibilities' in gt:
|
87 |
-
possibilities = gt['possibilities']
|
88 |
-
else:
|
89 |
-
possibilities = [gt['syllable'] + 'L', gt['syllable'] + 'H', gt['syllable'] + 'R']
|
90 |
-
|
91 |
-
# Skip unknown syllables
|
92 |
-
if load_book.HANJA_RE.match(gt['syllable']) or (gt['syllable'] == '?' and len(gt['possibilities']) == 0):
|
93 |
-
continue
|
94 |
-
|
95 |
-
# Display syllable
|
96 |
-
if display is not None:
|
97 |
-
print(pred, possibilities)
|
98 |
-
display(Image.fromarray(tile))
|
99 |
-
|
100 |
-
# Predict syllable
|
101 |
-
losses = recog.loss([tile] * len(possibilities), possibilities).numpy()
|
102 |
-
pred_idx = np.argmin(losses)
|
103 |
-
pred_output = possibilities[pred_idx]
|
104 |
-
|
105 |
-
# Save image
|
106 |
-
page_id = filename.replace('/', '_').split('.')[0]
|
107 |
-
out_path = pathlib.Path(f"real_syllables/{page_id}/{pred_output}_{page_id}_i{pred_syll_idx}.png")
|
108 |
-
out_path.parent.mkdir(parents=True, exist_ok=True)
|
109 |
-
Image.fromarray(tile).save(out_path)
|
110 |
-
|
111 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Comet/txt2im-models/comet.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import comet_ml
|
2 |
-
|
3 |
-
|
4 |
-
def start_experiment(
|
5 |
-
comet_api_key,
|
6 |
-
comet_workspace,
|
7 |
-
comet_project_name,
|
8 |
-
comet_experiment_name,
|
9 |
-
experiment,
|
10 |
-
):
|
11 |
-
if comet_api_key is None:
|
12 |
-
experiment = None
|
13 |
-
return (
|
14 |
-
experiment,
|
15 |
-
"""
|
16 |
-
Please add your API key in order to log your predictions to a Comet Experiment.
|
17 |
-
If you don't have a Comet account yet, you can sign up using the link below:
|
18 |
-
|
19 |
-
https://www.comet.ml/signup
|
20 |
-
""",
|
21 |
-
)
|
22 |
-
|
23 |
-
try:
|
24 |
-
if comet_experiment_name:
|
25 |
-
# Retrieve the Experiment if it already exists
|
26 |
-
api_experiment = get_experiment(
|
27 |
-
{
|
28 |
-
"api_key": comet_api_key,
|
29 |
-
"workspace": comet_workspace,
|
30 |
-
"project_name": comet_project_name,
|
31 |
-
"experiment": comet_experiment_name,
|
32 |
-
}
|
33 |
-
)
|
34 |
-
else:
|
35 |
-
# Create a new Experiment
|
36 |
-
api_experiment = comet_ml.APIExperiment(
|
37 |
-
api_key=comet_api_key,
|
38 |
-
workspace=comet_workspace,
|
39 |
-
project_name=comet_project_name,
|
40 |
-
)
|
41 |
-
api_experiment.log_other("Created from", "Spaces")
|
42 |
-
|
43 |
-
experiment = {
|
44 |
-
"api_key": comet_api_key,
|
45 |
-
"workspace": comet_workspace,
|
46 |
-
"project_name": comet_project_name,
|
47 |
-
"experiment": api_experiment.name,
|
48 |
-
}
|
49 |
-
|
50 |
-
return experiment, f"Started {api_experiment.name}. Happy logging!😊"
|
51 |
-
|
52 |
-
except Exception as e:
|
53 |
-
return None, e
|
54 |
-
|
55 |
-
|
56 |
-
def get_experiment(experiment_state):
|
57 |
-
try:
|
58 |
-
api_key = experiment_state.get("api_key")
|
59 |
-
workspace = experiment_state.get("workspace")
|
60 |
-
project = experiment_state.get("project_name")
|
61 |
-
experiment_name = experiment_state.get("experiment")
|
62 |
-
|
63 |
-
return comet_ml.API(api_key=api_key).get_experiment(
|
64 |
-
workspace=workspace, project_name=project, experiment=experiment_name
|
65 |
-
)
|
66 |
-
except Exception as e:
|
67 |
-
return None
|
68 |
-
|
69 |
-
|
70 |
-
def get_experiment_status(experiment_state):
|
71 |
-
experiment = get_experiment(experiment_state)
|
72 |
-
if experiment is not None:
|
73 |
-
name = experiment.name
|
74 |
-
return experiment_state, f"Currently logging to: {name}"
|
75 |
-
|
76 |
-
return experiment_state, f"No Experiments found"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|