parquet-converter commited on
Commit
5577e38
·
1 Parent(s): 28c3eda

Update parquet files (step 75 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Guitar Rig 6 Full Version [VERIFIED].md +0 -168
  2. spaces/1gistliPinn/ChatGPT4/Examples/893u2is User Manual.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Age Of Empires 3 No Cd Crack Gamecopyworld Gtahttps Scoutmails.com Index301.php K Age Of Empires 3 WORK.md +0 -129
  4. spaces/1gistliPinn/ChatGPT4/Examples/Download Automation Studio 5.6 Crack Freel !!TOP!!.md +0 -24
  5. spaces/1line/AutoGPT/autogpt/processing/__init__.py +0 -0
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe Acrobat Reader X The Power of PDF Productivity.md +0 -167
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street APK for PC How to Play the Stunning Racing Game on Your Laptop or Desktop.md +0 -131
  8. spaces/1phancelerku/anime-remove-background/CapCut Edit Videos like a Pro with TikToks Official Video Editor and Video Maker - Free Download.md +0 -93
  9. spaces/2023Liu2023/bingo/Dockerfile +0 -36
  10. spaces/2023Liu2023/bingo/src/app/page.tsx +0 -15
  11. spaces/2023Liu2023/bingo/src/components/chat.tsx +0 -93
  12. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/transform.py +0 -45
  13. spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_adv.py +0 -374
  14. spaces/AIGText/GlyphControl/ldm/models/diffusion/__init__.py +0 -0
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/mobilevit-small_4xb32_2000e_3c_noF/mobilevit-small_4xb32_2000e_3c_noF.py +0 -137
  16. spaces/AgProfile/GradioGenOpenAi/README.md +0 -12
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateButtons.js +0 -18
  18. spaces/Akmyradov/TurkmenTTSweSTT/uroman/bin/uroman-tsv.sh +0 -28
  19. spaces/AlexZou/Deploy_Restoration/net/utils.py +0 -86
  20. spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/misc.py +0 -262
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/img2img.md +0 -100
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion.py +0 -959
  23. spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py +0 -10
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py +0 -27
  25. spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py +0 -10
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv_custom/checkpoint.py +0 -500
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/__init__.py +0 -4
  28. spaces/Anonymous-sub/Rerender/gmflow_module/utils/utils.py +0 -58
  29. spaces/Anuj-Panthri/imdb_review_sentiment/app.py +0 -20
  30. spaces/Arsenii2023/Demo1/README.md +0 -12
  31. spaces/Artples/LLaMA-2-CHAT/README.md +0 -13
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/__main__.py +0 -31
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_modeling.py +0 -419
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/__init__.py +0 -1
  35. spaces/Bart92/RVC_HF/infer/modules/train/extract/extract_f0_rmvpe.py +0 -141
  36. spaces/Benson/text-generation/Examples/Descarga De La Red M.hollywoodbets.net.md +0 -104
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/zoneinfo/rebuild.py +0 -75
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/metadata/languages.py +0 -352
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/cookies.py +0 -561
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/ans_punct.py +0 -105
  41. spaces/CVPR/LIVE/thrust/thrust/detail/config/config.h +0 -39
  42. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/binary_search.h +0 -23
  43. spaces/CVPR/WALT/mmdet/core/evaluation/bbox_overlaps.py +0 -48
  44. spaces/CVPR/WALT/mmdet/models/losses/smooth_l1_loss.py +0 -139
  45. spaces/CVPR/lama-example/saicinpainting/training/modules/__init__.py +0 -31
  46. spaces/CVPR/regionclip-demo/detectron2/evaluation/cityscapes_evaluation.py +0 -194
  47. spaces/CognitiveLabs/Research-Assistant/README.md +0 -78
  48. spaces/Cropinky/hana_hanak_houses/networks_fastgan.py +0 -179
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/McIdasImagePlugin.py +0 -75
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_termui_impl.py +0 -739
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Guitar Rig 6 Full Version [VERIFIED].md DELETED
@@ -1,168 +0,0 @@
1
- <br />
2
- <h1>How to Download Guitar Rig 6 Full Version</h1>
3
- <p>If you are looking for a way to create realistic and professional guitar tones on your computer, you might have heard of <strong>Guitar Rig 6</strong>, the latest version of the popular amp simulator and multi-effects rack from Native Instruments. But how can you download Guitar Rig 6 full version for free or at a discounted price? And how can you install and use it to get the most out of your guitar playing and recording?</p>
4
- <h2>download guitar rig 6 full version</h2><br /><p><b><b>DOWNLOAD</b> >> <a href="https://byltly.com/2uKvLD">https://byltly.com/2uKvLD</a></b></p><br /><br />
5
- <p>In this article, we will answer all these questions and more. We will explain what Guitar Rig 6 is and why you need it, what features and benefits it offers, what system requirements and compatibility it has, how to download it for free or at a low cost, how to install and activate it on your computer, and how to use it to create amazing guitar tones. By the end of this article, you will have everything you need to know about downloading Guitar Rig 6 full version and using it to enhance your guitar sound.</p>
6
- <h2>What is Guitar Rig 6 and why you need it</h2>
7
- <p><strong>Guitar Rig 6</strong> is a software program that simulates the sound of various guitar amps, cabinets, pedals, effects, and tools. It allows you to plug your guitar into your computer and process your signal with a wide range of components that emulate real hardware devices. You can also use it as a standalone application or as a plugin in your digital audio workstation (DAW).</p>
8
- <p>Guitar Rig 6 is designed for guitarists of all levels and styles, from beginners to professionals, from rock to metal, from blues to jazz. Whether you want to practice, record, perform, or experiment with different sounds, Guitar Rig 6 can help you achieve your goals. You can use it to create realistic and authentic tones that match your favorite artists and genres, or you can use it to craft your own unique sounds that express your personality and creativity.</p>
9
- <h3>Guitar Rig 6 features and benefits</h3>
10
- <p>Guitar Rig 6 comes with a host of features and benefits that make it one of the best guitar effects software on the market. Here are some of them:</p>
11
- <p></p>
12
- <ul>
13
- <li><strong>Drag and drop interface</strong>: Guitar Rig 6 has a simple and intuitive interface that lets you easily build your own custom rigs by dragging and dropping components to the rack. You can also adjust the settings of each component with knobs, sliders, switches, and menus.</li>
14
- <li><strong>21 amp models with matching cabinets</strong>: Guitar Rig 6 offers a wide selection of amp models that cover various eras, styles, and sounds. From vintage classics to modern high-gain monsters, from clean and warm to crunchy and distorted, from British to American, you can find an amp that suits your taste. Each amp also comes with a matching cabinet that complements its tone.</li>
15
- <li><strong>68 effects models, tools, and modifiers</strong>: Guitar Rig 6 also provides a huge collection of effects models that emulate popular pedals, rack units, studio processors, and more. You can add effects such as distortion, overdrive, fuzz, compression, modulation, delay, reverb, pitch shifting, filtering, EQ, noise gate, looper, tuner, metronome, etc. You can also use tools such as splitters, mix ers, crossovers, and modifiers to shape and control your signal flow and dynamics.</li>
16
- <li><strong>New amps and effects powered by Intelligent Circuit Modeling</strong>: Guitar Rig 6 introduces a new technology called <strong>Intelligent Circuit Modeling</strong> that uses artificial intelligence to analyze and recreate the behavior of real analog circuits. This results in more realistic and responsive sounds that capture the nuances and character of the original hardware. Guitar Rig 6 features three new amps and 16 new effects based on this technology, such as the Chicago, Bass Invader, Fire Breather, Harmonic Synthesizer, Grain Delay, Choral Reef, etc.</li>
17
- <li><strong>Over 300 presets and styles</strong>: Guitar Rig 6 also comes with over 300 presets that are ready to use or tweak to your liking. These presets are organized by styles, such as rock, metal, blues, jazz, pop, etc., and by artists, such as Jimi Hendrix, Slash, John Frusciante, David Gilmour, etc. You can also create your own presets and save them for later use.</li>
18
- <li><strong>Flexible routing and sidechaining</strong>: Guitar Rig 6 allows you to route your signal in various ways to create complex and creative sounds. You can use up to eight parallel racks to process different parts of your signal separately, or you can use sidechaining to modulate one component with another. For example, you can use a compressor to duck the volume of your guitar when you sing into a microphone, or you can use an envelope follower to control the filter cutoff of a synth with your guitar.</li>
19
- <li><strong>MIDI control and automation</strong>: Guitar Rig 6 also supports MIDI control and automation, which means you can use external MIDI devices such as footswitches, pedals, keyboards, controllers, etc., to control the parameters of Guitar Rig 6 in real time. You can also record and edit automation data in your DAW to automate changes in your sound over time.</li>
20
- <li><strong>Integration with other Native Instruments products</strong>: Guitar Rig 6 is compatible with other Native Instruments products, such as Komplete Kontrol keyboards, Maschine grooveboxes, Traktor DJ software, etc. You can use these products to access and control Guitar Rig 6 features and functions more easily and intuitively. You can also use Guitar Rig 6 as an effect plugin for other Native Instruments instruments and sounds.</li>
21
- </ul>
22
- <h3>Guitar Rig 6 system requirements and compatibility</h3>
23
- <p>Guitar Rig 6 is compatible with Windows and Mac operating systems. Here are the minimum system requirements for running Guitar Rig 6:</p>
24
- <table>
25
- <tr>
26
- <th>Operating System</th>
27
- <th>Windows 10 (64-bit)</th>
28
- <th>macOS 10.14 or higher</th>
29
- </tr>
30
- <tr>
31
- <td>CPU</td>
32
- <td>Intel Core i5 or equivalent AMD processor</td>
33
- <td>Intel Core i5 or equivalent Apple processor</td>
34
- </tr>
35
- <tr>
36
- <td>RAM</td>
37
- <td>4 GB (8 GB recommended)</td>
38
- <td>4 GB (8 GB recommended)</td>
39
- </tr>
40
- <tr>
41
- <td>Disk Space</td>
42
- <td>1 GB for Guitar Rig 6 Player<br>3 GB for Guitar Rig 6 Pro</td>
43
- <td>1 GB for Guitar Rig 6 Player<br>3 GB for Guitar Rig 6 Pro</td>
44
- </tr>
45
- <tr>
46
- <td>Graphics Card</td>
47
- <td>NVIDIA GeForce GTX 600 series or higher<br>AMD Radeon HD 7000 series or higher<br>Intel HD Graphics 4000 or higher</td>
48
- <td>NVIDIA GeForce GTX 600 series or higher<br>AMD Radeon HD 7000 series or higher<br>Intel HD Graphics 4000 or higher</td>
49
- </tr>
50
- <tr>
51
- <td>Audio Interface</td>
52
- <td>A dedicated audio interface with ASIO driver support is recommended for optimal performance and low latency.</td>
53
- <td>A dedicated audio interface with Core Audio driver support is recommended for optimal performance and low latency.</td>
54
- </tr>
55
- <tr>
56
- <td>MIDI Device</td>
57
- <td>A MIDI device such as a footswitch, pedal, keyboard, controller, etc., is optional but recommended for controlling Guitar Rig 6 parameters in real time.</td>
58
- <td>A MIDI device such as a footswitch, pedal, keyboard, controller, etc., is optional but recommended for controlling Guitar Rig 6 parameters in real time.</td>
59
- </tr> <h2>How to download Guitar Rig 6 full version for free</h2>
60
- <p>Now that you know what Guitar Rig 6 is and what it can do for you, you might be wondering how to download it for free or at a low cost. There are three ways to get Guitar Rig 6 full version for free or at a discounted price:</p>
61
- <h3>Guitar Rig 6 Player: the free version with limited features</h3>
62
- <p>The first way to get Guitar Rig 6 full version for free is to download <strong>Guitar Rig 6 Player</strong>, the free version of Guitar Rig 6 that comes with limited features. Guitar Rig 6 Player is a great way to try out Guitar Rig 6 and see if you like it before buying the full version. Guitar Rig 6 Player includes:</p>
63
- <ul>
64
- <li>One amp model: Jump, based on the Marshall JMP Plexi</li>
65
- <li>One cabinet model: Matched Cabinet, based on the Marshall 1960A</li>
66
- <li>13 effects models: Skreamer, Demon Distortion, Cat, Mezcal, Citrus, AC Box, Gratifier, Treble Booster, Vintage Reverb, Quad Delay, Stereo Tune, Limiter, and Noise Reduction</li>
67
- <li>50 presets and styles</li>
68
- <li>The ability to use Guitar Rig 6 Player as a standalone application or as a plugin in your DAW</li>
69
- </ul>
70
- <p>To download Guitar Rig 6 Player for free, you need to create a free Native Instruments account and download the Native Access app. Native Access is a software that manages the installation and activation of Native Instruments products. Once you have Native Access installed, you can download Guitar Rig 6 Player from the Not Installed tab and install it on your computer.</p>
71
- <h3>Guitar Rig 6 Demo: the trial version with full features</h3>
72
- <p>The second way to get Guitar Rig 6 full version for free is to download <strong>Guitar Rig 6 Demo</strong>, the trial version of Guitar Rig 6 that comes with full features. Guitar Rig 6 Demo is a great way to test all the features and functions of Guitar Rig 6 and see if it meets your needs and expectations before buying the full version. Guitar Rig 6 Demo includes:</p>
73
- <ul>
74
- <li>All the features and benefits of Guitar Rig 6 Pro (see below)</li>
75
- <li>The ability to use Guitar Rig 6 Demo as a standalone application or as a plugin in your DAW</li>
76
- <li>A time limit of 30 minutes per session</li>
77
- <li>A noise burst every few minutes</li>
78
- <li>No saving or exporting of presets or sounds</li>
79
- </ul>
80
- <p>To download Guitar Rig 6 Demo for free, you need to create a free Native Instruments account and download the Native Access app. Once you have Native Access installed, you can download Guitar Rig 6 Demo from the Not Installed tab and install it on your computer.</p>
81
- <h3>Guitar Rig 6 Pro: the paid version with all features</h3>
82
- <p>The third way to get Guitar Rig 6 full version is to buy <strong>Guitar Rig 6 Pro</strong>, the paid version of Guitar Rig 6 that comes with all features. Guitar Rig 6 Pro is the ultimate guitar effects software that gives you unlimited creative possibilities and professional results. Guitar Rig 6 Pro includes:</p>
83
- <ul>
84
- <li>All the features and benefits of Guitar Rig 6 (see above)</li>
85
- <li>No time limit or noise burst</li>
86
- <li>The ability to save and export presets and sounds</li>
87
- <li>The ability to use Guitar Rig 6 Pro as a standalone application or as a plugin in your DAW</li>
88
- <li>Free updates and support from Native Instruments</li>
89
- </ul>
90
- <p>To buy Guitar Rig 6 Pro, you need to create a free Native Instruments account and download the Native Access app. Once you have Native Access installed, you can buy Guitar Rig 6 Pro from the Shop tab and install it on your computer. The price of Guitar Rig 6 Pro is $199 USD. However, there are some ways to get it at a discounted price:</p>
91
- <ul>
92
- <li>If you already own a previous version of Guitar Rig (Guitar Rig 1-5), you can upgrade to Guitar Rig 6 Pro for $99 USD.</li>
93
- <li>If you already own Komplete Start (a free collection of instruments and sounds from Native Instruments), you can crossgrade to Guitar Rig 6 Pro for $149 USD.</li>
94
- <li>If you already own Komplete Select (a curated collection of instruments and sounds from Native Instruments), you can crossgrade to Guitar Rig 6 Pro for $99 USD.</li>
95
- <li>If you already own Komplete (the ultimate production suite from Native Instruments), you can get Guitar Rig 6 Pro for free as part of your bundle.</li>
96
- </ul> <h2>How to install and activate Guitar Rig 6 full version</h2>
97
- <p>Once you have downloaded Guitar Rig 6 full version, either for free or for a price, you need to install and activate it on your computer. Here are the steps to do so:</p>
98
- <h3>How to install Guitar Rig 6 on your computer</h3>
99
- <p>To install Guitar Rig 6 on your computer, you need to use the Native Access app that you downloaded earlier. Here are the steps to install Guitar Rig 6 with Native Access:</p>
100
- <ol>
101
- <li>Open Native Access and log in with your Native Instruments account.</li>
102
- <li>Go to the Installed Products tab and find Guitar Rig 6 in the list.</li>
103
- <li>Click on the Install button and choose a location for the installation.</li>
104
- <li>Wait for the installation to complete and click on the Finish button.</li>
105
- <li>Guitar Rig 6 is now installed on your computer and ready to use.</li>
106
- </ol>
107
- <h3>How to activate Guitar Rig 6 with your license key or Native Access account</h3>
108
- <p>To activate Guitar Rig 6 on your computer, you need to use either your license key or your Native Access account. Here are the steps to activate Guitar Rig 6 with either method:</p>
109
- <ul>
110
- <li>If you bought Guitar Rig 6 Pro or upgraded from a previous version, you should have received a license key via email. To activate Guitar Rig 6 with your license key, follow these steps: <ol>
111
- <li>Open Native Access and log in with your Native Instruments account.</li>
112
- <li>Go to the Add a serial tab and enter your license key in the field.</li>
113
- <li>Click on the Add serial button and wait for the activation to complete.</li>
114
- <li>Guitar Rig 6 is now activated on your computer and ready to use.</li>
115
- </ol>
116
- </li>
117
- <li>If you downloaded Guitar Rig 6 Player or Guitar Rig 6 Demo, you don't need a license key. To activate Guitar Rig 6 with your Native Access account, follow these steps: <ol>
118
- <li>Open Native Access and log in with your Native Instruments account.</li>
119
- <li>Go to the Installed Products tab and find Guitar Rig 6 in the list.</li>
120
- <li>Click on the Activate button and wait for the activation to complete.</li>
121
- <li>Guitar Rig 6 is now activated on your computer and ready to use.</li>
122
- </ol>
123
- </li>
124
- </ul>
125
- <h2>How to use Guitar Rig 6 full version to create amazing guitar tones</h2>
126
- <p>Now that you have installed and activated Guitar Rig 6 full version on your computer, you can start using it to create amazing guitar tones. Here are some tips and tricks on how to use Guitar Rig 6 full version effectively and efficiently:</p>
127
- <h3>How to navigate the Guitar Rig 6 interface and browser</h3>
128
- <p>Guitar Rig 6 has a user-friendly interface that consists of four main sections: the header, the browser, the rack, and the footer. Here is a brief overview of each section:</p>
129
- <ul>
130
- <li>The header contains the menu bar, the toolbar, and the preset name. You can use the menu bar to access various options and settings, such as file, edit, view, help, etc. You can use the toolbar to access various functions and tools, such as tuner, metronome, tape deck, etc. You can also see and change the name of the current preset in the header.</li>
131
- <li>The browser contains the preset list, the style list, and the component list. You can use the preset list to browse, load, save, delete, or search presets. You can use the style list to filter presets by styles, such as rock, metal, blues, jazz, etc. You can use the component list to browse, load, or search components, such as amps, cabinets, effects, tools, etc.</li>
132
- <li>The rack contains the components that make up your guitar rig. You can see and adjust the settings of each component with knobs, sliders, switches, and menus. You can also drag and drop components to add, remove, or rearrange them in the rack. You can also use splitters, mixers, crossovers and modifiers to shape and control your signal flow and dynamics.</li>
133
- <li>The footer contains the master volume, the input level, the output level, and the CPU usage. You can use the master volume to adjust the overall volume of your guitar rig. You can also see and adjust the input level and the output level of your guitar rig. You can also see the CPU usage of your computer and optimize it if needed.</li>
134
- </ul>
135
- <p>To navigate the Guitar Rig 6 interface and browser, you can use your mouse, keyboard, or MIDI device. You can also use shortcuts and commands to access various functions and tools more quickly. For example, you can use the arrow keys to navigate the preset list, the style list, and the component list. You can also use the spacebar to bypass or enable a component, or use the delete key to remove a component from the rack. You can also use commands such as Ctrl+C to copy a component, Ctrl+V to paste a component, Ctrl+Z to undo an action, etc.</p>
136
- <h3>How to load and customize presets and components</h3>
137
- <p>Guitar Rig 6 comes with over 300 presets that are ready to use or tweak to your liking. You can also create your own presets and save them for later use. Here are some tips on how to load and customize presets and components:</p>
138
- <ul>
139
- <li>To load a preset, you can use the browser to find and select a preset from the preset list or the style list. You can also use the arrow keys or the mouse wheel to scroll through the presets. You can also use the search function to find a preset by name or keyword. Once you have found a preset that you like, you can double-click on it or press Enter to load it to the rack.</li>
140
- <li>To customize a preset, you can use the rack to adjust the settings of each component with knobs, sliders, switches, and menus. You can also drag and drop components to add, remove, or rearrange them in the rack. You can also use splitters, mixers, crossovers, and modifiers to shape and control your signal flow and dynamics. You can also use MIDI control and automation to control the parameters of Guitar Rig 6 in real time.</li>
141
- <li>To save a preset, you can click on the Save button in the header and enter a name for your preset. You can also choose a style for your preset from the style list. You can also add tags and comments to your preset for easier identification and organization. Once you have saved your preset, you can find it in the User folder in the browser.</li>
142
- <li>To load a component, you can use the browser to find and select a component from the component list. You can also use the arrow keys or the mouse wheel to scroll through the components. You can also use the search function to find a component by name or keyword. Once you have found a component that you like, you can drag and drop it to an empty slot in the rack or on top of an existing component to replace it.</li>
143
- <li>To customize a component, you can use the rack to adjust the settings of each component with knobs, sliders, switches, and menus. You can also drag and drop components to add, remove, or rearrange them in the rack. You can also use splitters, mixers, crossovers and modifiers to shape and control your signal flow and dynamics. You can also use MIDI control and automation to control the parameters of Guitar Rig 6 in real time.</li>
144
- </ul>
145
- <h3>How to use the new amps and effects powered by Intelligent Circuit Modeling</h3>
146
- <p>Guitar Rig 6 introduces a new technology called <strong>Intelligent Circuit Modeling</strong> that uses artificial intelligence to analyze and recreate the behavior of real analog circuits. This results in more realistic and responsive sounds that capture the nuances and character of the original hardware. Guitar Rig 6 features three new amps and 16 new effects based on this technology, such as the Chicago, Bass Invader, Fire Breather, Harmonic Synthesizer, Grain Delay, Choral Reef, etc. Here are some tips on how to use the new amps and effects powered by Intelligent Circuit Modeling:</p>
147
- <ul>
148
- <li>To load a new amp or effect, you can use the browser to find and select a component from the component list. You can also use the arrow keys or the mouse wheel to scroll through the components. You can also use the search function to find a component by name or keyword. Once you have found a component that you like, you can drag and drop it to an empty slot in the rack or on top of an existing component to replace it.</li>
149
- <li>To customize a new amp or effect, you can use the rack to adjust the settings of each component with knobs, sliders, switches, and menus. You can also drag and drop components to add, remove, or rearrange them in the rack. You can also use splitters, mixers, crossovers, and modifiers to shape and control your signal flow and dynamics. You can also use MIDI control and automation to control the parameters of Guitar Rig 6 in real time.</li>
150
- <li>To get the best sound from a new amp or effect, you need to match it with a suitable cabinet or speaker. You can use the browser to find and select a cabinet or speaker from the component list. You can also use the arrow keys or the mouse wheel to scroll through the components. You can also use the search function to find a component by name or keyword. Once you have found a component that you like, you can drag and drop it to an empty slot in the rack or on top of an existing component to replace it.</li>
151
- <li>To experiment with different sounds from a new amp or effect, you can use the presets that come with each component. You can use the browser to find and select a preset from the preset list or the style list. You can also use the arrow keys or the mouse wheel to scroll through the presets. You can also use the search function to find a preset by name or keyword. Once you have found a preset that you like, you can double-click on it or press Enter to load it to the rack.</li>
152
- </ul>
153
- <h2>Conclusion and FAQs</h2>
154
- <p>Guitar Rig 6 is a powerful and versatile guitar effects software that can help you create realistic and professional guitar tones on your computer. It offers a wide range of features and benefits that make it one of the best guitar effects software on the market. It also comes with three ways to get Guitar Rig 6 full version for free or at a discounted price: Guitar Rig 6 Player, Guitar Rig 6 Demo, and Guitar Rig 6 Pro.</p>
155
- <p>In this article, we have explained what Guitar Rig 6 is and why you need it, what features and benefits it offers, what system requirements and compatibility it has, how to download it for free or at a low cost, how to install and activate it on your computer, and how to use it to create amazing guitar tones. We hope that this article has helped you learn everything you need to know about downloading Guitar Rig 6 full version and using it to enhance your guitar sound.</p>
156
- <p>If you have any questions or doubts about Guitar Rig 6 full version, here are some frequently asked questions (FAQs) that might help you:</p>
157
- <h4>Q: Can I use Guitar Rig 6 with any guitar?</h4>
158
- <p>A: Yes, you can use Guitar Rig 6 with any electric guitar, acoustic guitar, bass guitar, or any other instrument that has a pickup or a microphone. You just need to connect your instrument to your computer via an audio interface with an instrument input.</p>
159
- <h4>Q: Can I use Guitar Rig 6 with any DAW?</h4>
160
- <p>A: Yes, you can use Guitar Rig 6 with any DAW that supports VST, AU, or AAX plugin formats. You just need to load Guitar Rig 6 as an effect plugin in your DAW's track or bus.</p>
161
- <h4>Q: Can I use Guitar Rig 6 offline?</h4>
162
- <p>A: Yes, you can use Guitar Rig 6 offline as a standalone application without an internet connection. However, you need an internet connection to download, install, and activate Guitar Rig 6 for the first time. You also need an internet connection to access the online features and updates of Guitar Rig 6.</p>
163
- <h4>Q: Can I use Guitar Rig 6 with other guitar effects software or hardware?</h4>
164
- <p>A: Yes, you can use Guitar Rig 6 with other guitar effects software or hardware, as long as they are compatible and do not cause any conflicts or issues. You can use Guitar Rig 6 as an effect plugin in your DAW and combine it with other plugins, or you can use Guitar Rig 6 as a standalone application and route it to other software or hardware via an audio interface or a virtual cable.</p>
165
- <h4>Q: Can I share my Guitar Rig 6 presets and sounds with others?</h4>
166
- <p>A: Yes, you can share your Guitar Rig 6 presets and sounds with others, as long as you respect the intellectual property rights of Native Instruments and the original creators of the components and presets. You can export your presets and sounds as files and send them to others via email, social media, cloud storage, etc. You can also import presets and sounds from others and load them to your Guitar Rig 6.</p> b2dd77e56b<br />
167
- <br />
168
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/893u2is User Manual.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>893u2is User Manual</h2><br /><p><b><b>Download</b> ===== <a href="https://imgfil.com/2uy100">https://imgfil.com/2uy100</a></b></p><br /><br />
2
- <br />
3
- Oct 8, 2015 Only after I read the instructions carefully did I see the ... Station Users Guide Multi-Function Hdd Docking Manual 893u2is ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Age Of Empires 3 No Cd Crack Gamecopyworld Gtahttps Scoutmails.com Index301.php K Age Of Empires 3 WORK.md DELETED
@@ -1,129 +0,0 @@
1
-
2
- <h1>Age of Empires 3 No CD Crack GameCopyWorld: How to Play the Classic Strategy Game Without a Disc</h1>
3
-
4
- <p>Age of Empires 3 is one of the most popular and acclaimed strategy games of all time, but it also requires a CD to play. If you have lost your CD, or you want to play the game on a different computer without carrying the disc around, you might be looking for a way to play Age of Empires 3 no CD crack GameCopyWorld.</p>
5
-
6
- <p>GameCopyWorld is a website that provides game fixes, trainers, cheats, and patches for various PC games. One of the game fixes they offer is a no CD crack for Age of Empires 3, which allows you to play the game without inserting the CD every time. This can also help you avoid potential damage to your CD or CD drive.</p>
7
- <h2>age of empires 3 no cd crack gamecopyworld gtahttps: scoutmails.com index301.php k age of empires 3</h2><br /><p><b><b>Download File</b> &#10037;&#10037;&#10037; <a href="https://imgfil.com/2uxZHS">https://imgfil.com/2uxZHS</a></b></p><br /><br />
8
-
9
- <p>In this article, we will show you how to download and install Age of Empires 3 no CD crack GameCopyWorld, and how to enjoy the game without any hassle. We will also tell you about some of the features and benefits of playing Age of Empires 3 no CD crack GameCopyWorld.</p>
10
-
11
- <h2>How to Download and Install Age of Empires 3 No CD Crack GameCopyWorld</h2>
12
-
13
- <p>To download and install Age of Empires 3 no CD crack GameCopyWorld, you will need to follow these steps:</p>
14
-
15
- <ol>
16
- <li>Go to <a href="https://www.gamecopyworld.com/games/pc_age_of_empires_3.shtml">https://www.gamecopyworld.com/games/pc_age_of_empires_3.shtml</a> and scroll down to find the game fix you need. Depending on which version and expansion of Age of Empires 3 you have, you will need to choose the appropriate no CD crack. For example, if you have Age of Empires 3: Complete Collection, which includes the base game and both expansions (The WarChiefs and The Asian Dynasties), you will need to download Age of Empires III: Complete Collection v1.0 [EN] Fixed Files.</li>
17
- <li>Click on the download link and save the file to your computer. You may need to use a program like WinRAR or 7-Zip to extract the file.</li>
18
- <li>Locate the folder where you have installed Age of Empires 3 on your computer. It is usually in C:\Program Files (x86)\Microsoft Games\Age of Empires III.</li>
19
- <li>Copy the cracked files from the downloaded folder and paste them into the installation folder, replacing the original files. You may need to backup the original files in case you want to restore them later.</li>
20
- <li>Run the game as usual. You should be able to play Age of Empires 3 without inserting the CD.</li>
21
- </ol>
22
-
23
- <h2>Features and Benefits of Playing Age of Empires 3 No CD Crack GameCopyWorld</h2>
24
-
25
- <p>Playing Age of Empires 3 no CD crack GameCopyWorld has some advantages over playing with the CD. Here are some of them:</p>
26
-
27
- <ul>
28
- <li>You can play the game on any computer without carrying the CD around.</li>
29
- <li>You can avoid potential damage to your CD or CD drive from scratches or wear and tear.</li>
30
- <li>You can save some disk space by deleting the ISO image of the CD if you have one.</li>
31
- <li>You can enjoy faster loading times and smoother performance by playing from your hard drive instead of your CD drive.</li>
32
- <li>You can still access all the features and content of the game, including multiplayer mode, online updates, and mods.</li>
33
- </ul>
34
-
35
- <h2>Conclusion</h2>
36
-
37
- <p>Age of Empires 3 is a classic strategy game that deserves to be played by anyone who loves history, culture, and warfare. With Age of Empires 3 no CD crack GameCopyWorld, you can play the game without any hassle or limitation. Just follow our guide on how to download and install Age of Empires 3 no CD crack GameCopyWorld, and enjoy the game at its best.</p>
38
-
39
- <p>If you liked this article, please share it with your friends who are also fans of Age of Empires 3. And if you have any questions or comments, feel free to leave them below. We would love to hear from you!</p>
40
- <h2>What is Age of Empires 3 and Why Should You Play It?</h2>
41
-
42
- <p>Age of Empires 3 is a real-time strategy game that was released in 2005 by Microsoft Studios and Ensemble Studios. It is the third installment in the Age of Empires series, which is one of the most successful and influential strategy game franchises of all time.</p>
43
-
44
- <p>Age of Empires 3 takes place during the Age of Discovery, from the 15th to the 19th century. You can choose from eight European civilizations, each with their own unique units, buildings, technologies, and abilities. You can also play as three native American civilizations in the WarChiefs expansion, or as three Asian civilizations in the Asian Dynasties expansion.</p>
45
-
46
- <p>Age of Empires 3 offers a rich and varied gameplay experience that will appeal to both casual and hardcore strategy fans. You can explore and colonize new lands, trade and fight with other players or AI opponents, build and manage your economy and military, research new technologies and upgrades, and customize your home city that provides you with bonuses and shipments.</p>
47
- <p></p>
48
-
49
- <p>Age of Empires 3 also features a compelling campaign mode that follows the story of three generations of the Black family, as they participate in historical events such as the Seven Years' War, the American Revolution, and the Napoleonic Wars. The campaign mode has cinematic cutscenes, voice acting, and scripted scenarios that will immerse you in the history and culture of the era.</p>
50
-
51
- <p>Age of Empires 3 is a classic strategy game that deserves to be played by anyone who loves history, culture, and warfare. It has stunning graphics, sound effects, and music that bring the game world to life. It has a large and active online community that supports the game with mods, maps, tournaments, and more. It has a high replay value, as you can try different strategies, civilizations, game modes, and difficulty levels.</p>
52
-
53
- <h2>How to Play Age of Empires 3 No CD Crack GameCopyWorld Online</h2>
54
-
55
- <p>One of the best features of Age of Empires 3 is its online multiplayer mode, where you can challenge other players from around the world in various game modes such as supremacy, deathmatch, treaty, king of the hill, and more. You can also join or create clans, chat with other players, check your stats and rankings, and earn medals and achievements.</p>
56
-
57
- <p>However, to play Age of Empires 3 online, you need to have a valid CD key that is registered on your Microsoft account. If you have lost your CD key, or you have downloaded Age of Empires 3 no CD crack GameCopyWorld from our website, you might not be able to access the official online servers.</p>
58
-
59
- <p>But don't worry, there is a way to play Age of Empires 3 no CD crack GameCopyWorld online without a CD key. All you need to do is download and install a third-party client called ESOCommunity Patch. This patch will allow you to play Age of Empires 3 no CD crack GameCopyWorld online on ESOCommunity servers, which are unofficial but popular servers that host thousands of players every day.</p>
60
-
61
- <p>To download and install ESOCommunity Patch for Age of Empires 3 no CD crack GameCopyWorld, you will need to follow these steps:</p>
62
-
63
- <ol>
64
- <li>Go to <a href="https://eso-community.net/download-patch">https://eso-community.net/download-patch</a> and click on the download button.</li>
65
- <li>Run the installer and follow the instructions. Make sure you select your Age of Empires 3 installation folder when prompted.</li>
66
- <li>Launch Age of Empires 3 no CD crack GameCopyWorld from your desktop shortcut or start menu.</li>
67
- <li>Create a new ESO account or log in with your existing one. You don't need a CD key to create an account.</li>
68
- <li>Enjoy playing Age of Empires 3 no CD crack GameCopyWorld online on ESOCommunity servers!</li>
69
- </ol>
70
-
71
- <h2>Conclusion</h2>
72
-
73
- <p>Age of Empires 3 no CD crack GameCopyWorld is a great way to play the classic strategy game without a disc. You can download and install it easily from our website, and enjoy all the features and content of the game without any hassle. You can also play it online on ESOCommunity servers with other players who have downloaded Age of Empires 3 no CD crack GameCopyWorld.</p>
74
-
75
- <p>If you liked this article, please share it with your friends who are also fans of Age of Empires 3. And if you have any questions or comments, feel free to leave them below. We would love to hear from you!</p>
76
- <h2>How to Master the Combat System in Age of Empires 3</h2>
77
-
78
- <p>Age of Empires 3 is not just about building and managing your economy; it is also about fighting and conquering your enemies. The combat system in Age of Empires 3 is based on a rock-paper-scissors model, where each unit type has strengths and weaknesses against other unit types. For example, infantry units are good against cavalry units, cavalry units are good against artillery units, and artillery units are good against infantry units.</p>
79
-
80
- <p>To master the combat system in Age of Empires 3, you need to know the different unit types and their counters, as well as how to use formations, stances, and special abilities. You also need to pay attention to the terrain, the weather, and the line of sight, as they can affect the performance and visibility of your units.</p>
81
-
82
- <p>Here are some general tips and tricks for combat in Age of Empires 3:</p>
83
-
84
- <ul>
85
- <li>Always scout your enemy's base and army composition before attacking. This will help you plan your strategy and choose the right units for the battle.</li>
86
- <li>Try to have a balanced army with a mix of unit types. This will allow you to adapt to different situations and counter different threats.</li>
87
- <li>Use formations to organize your army and give them bonuses. For example, the staggered formation gives your ranged units more firing arc, while the box formation protects your artillery units from cavalry charges.</li>
88
- <li>Use stances to control the behavior of your units. For example, the defensive stance makes your units hold their ground and focus on nearby enemies, while the aggressive stance makes your units chase and attack any enemy they see.</li>
89
- <li>Use special abilities to gain an edge in combat. For example, some infantry units can use bayonets or grenades to deal extra damage, while some cavalry units can use trample mode to run over enemy infantry.</li>
90
- <li>Use cover mode to reduce damage from ranged attacks. Cover mode makes your units kneel behind obstacles like trees or walls, but it also reduces their movement speed and firing rate.</li>
91
- <li>Use flanking maneuvers to surprise and outsmart your enemy. Flanking means attacking your enemy from the sides or behind, where they are more vulnerable and less prepared.</li>
92
- <li>Use hit-and-run tactics to harass and weaken your enemy. Hit-and-run tactics mean attacking your enemy with fast units like cavalry or skirmishers, then retreating before they can retaliate.</li>
93
- <li>Use siege weapons to destroy enemy buildings and defenses. Siege weapons like cannons or mortars can deal massive damage to buildings and walls, but they are slow and vulnerable to enemy fire.</li>
94
- <li>Use ships to support your land army or attack from the sea. Ships can transport units across water, bombard enemy positions from afar, or engage in naval battles with other ships.</li>
95
- </ul>
96
-
97
- <h2>How to Enjoy the Campaign Mode in Age of Empires 3</h2>
98
-
99
- <p>If you are looking for a more story-driven and cinematic experience in Age of Empires 3, you might want to try the campaign mode. The campaign mode consists of three acts that follow the adventures of the Black family through different historical periods and continents.</p>
100
-
101
- <p>The first act is called Blood, Ice, and Steel, and it takes place during the colonization of America in the 16th and 17th centuries. You will play as Morgan Black, a knight of Malta who fights against the Spanish conquistadors and their allies.</p>
102
-
103
- <p>The second act is called Fire and Shadow, and it takes place during the American Revolution in the 18th century. You will play as John Black, a mercenary who joins the Continental Army and battles against the British Empire.</p>
104
-
105
- <p>The third act is called Steel and Thunder, and it takes place during the Napoleonic Wars in the 19th century. You will play as Amelia Black, a railroad tycoon who travels across Europe and Asia in search of her lost family legacy.</p>
106
-
107
- <p>The campaign mode in Age of Empires 3 offers a rich and varied gameplay experience that will appeal to both casual and hardcore strategy fans. You will explore and colonize new lands, trade and fight with other factions, build and manage your economy and military, research new technologies and upgrades, and customize your home city that provides you with bonuses and shipments.</p>
108
-
109
- <p>The campaign mode also features cinematic cutscenes, voice acting, and scripted scenarios that will immerse you in the history and culture of the era. You will meet historical figures like George Washington , Napoleon Bonaparte , Simon Bolivar , Queen Isabella , Tokugawa Ieyasu , Akbar , Ivan the Terrible , Elizabeth I , Samuel de Champlain , Tecumseh , Nathaniel Black , Sahin \"The Falcon\" , Kanyenke , Lizzie \"The Pirate\" , Alain Magnan , Warwick \"The Redcoat\" , Pierre Beaumont , Stuart Black , Nonahkee , Sven Kuechler , Huang He , Admiral Jinhai , Nanib Sahir , Rani Pravarthi , Colonel Edwardson , Chayton Black , Holme \"The Boneguard\" , Crazy Horse , Chief Brave Wolf , General Custer , Major Cooper , Kichiro , Daimyo Mototada Torii , Daimyo Junkei Kuroda , Daimyo Shingen Takeda , Daimyo Kenshin Uesugi , Daimyo Nobunaga Oda , Daimyo Hideyoshi Toyotomi , Daimyo Ieyasu Tokugawa .</p>
110
-
111
- <p>If you want to enjoy the campaign mode in Age of Empires 3, here are some tips and tricks:</p>
112
-
113
- <ul>
114
- <li>Play on a difficulty level that suits your skill level. The campaign mode has four difficulty levels: Easy, Moderate, Hard, and Expert. The higher the difficulty level, the more challenging the enemies will be.</li>
115
- <li>Watch the cutscenes and listen to the dialogue. They will provide you with important information about the story, characters, objectives, hints, tips, etc.</li>
116
- <li>Read the objectives carefully. They will tell you what you need to do to complete each mission. Some objectives are mandatory (marked with a star), while others are optional (marked with a circle).</li>
117
- <li>Check the map often. It will show you where you are, where your allies and enemies are, where your objectives are located, etc.</li>
118
- <li>Save your game frequently. You never know when something might go wrong or when you might want to try a different strategy.</li>
119
- <li>Have fun! The campaign mode is designed to be entertaining and engaging for all kinds of players. Don't worry too much about winning or losing; just enjoy the journey!</li>
120
- </ul>
121
- <h2>Conclusion</h2>
122
-
123
- <p>Age of Empires 3 no CD crack GameCopyWorld is a great way to play the classic strategy game without a disc. You can download and install it easily from our website, and enjoy all the features and content of the game without any hassle. You can also play it online on ESOCommunity servers with other players who have downloaded Age of Empires 3 no CD crack GameCopyWorld.</p>
124
-
125
- <p>In this article, we have shown you how to download and install Age of Empires 3 no CD crack GameCopyWorld, how to master the combat system in Age of Empires 3, and how to enjoy the campaign mode in Age of Empires 3. We hope you have found this article helpful and informative, and that you have learned some useful tips and tricks to boost your game.</p>
126
-
127
- <p>If you liked this article, please share it with your friends who are also fans of Age of Empires 3. And if you have any questions or comments, feel free to leave them below. We would love to hear from you!</p> 3cee63e6c2<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Automation Studio 5.6 Crack Freel !!TOP!!.md DELETED
@@ -1,24 +0,0 @@
1
- <h2>Download Automation Studio 5.6 Crack Freel</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://imgfil.com/2uy1GW">https://imgfil.com/2uy1GW</a></b></p><br /><br />
2
- <br />
3
- Winsound.com Automation Studio; Read the Manuals and FAQs in the Digital Audio Forum; Learn More About Old Stock Author: Dan, JVC Author: Jack Szabo from Jack's JVC Revamp; Jack's JVC Revamp 5,…Category: Audio - Digital Audio - Components & Equipment Other Related Categories AudioSoftwareTuning & MeasurementsAudioCables & DevicesToolsMagazines & JournalsMembers ClubsOther Educational Sites Review Top Posts Analyze Audio at What Hi, I’m Dan. With a knowledge of some 35 years of audio, I have been writing about the companies, products, and technologies in this business since 1999. I am an Authorized JVC Dealer, and the Audio & Network Assistant Editor here at Home Theater Forum. View my complete profile
4
-
5
- Repair Shop Studios now offers a series of licensing programs that can enable you to generate a royalty stream for your independently developed projects, including the JVC AiS Software Suite, the JVC AiS Suite, and the JVC AiS Suite Plus.
6
-
7
- Thanks for the info!
8
-
9
- I can't find the manuals for this one either. Will just have to use the information above in this thread I guess. On the CD there are 2 files for the CD Writer, a program for the CD writer and another for the CD Writer Service.
10
-
11
- I have the new version 1.02 and have used the CD Writer 1.02 with software version AOS22 which says the disc I used was OSD 2.6 version. I have also used the CD Writer version 1.02 with software version BOS21 with no OSD disc. The CD Writer version 1.02 with AOS22 will not write on my ATR Vista.
12
-
13
- I did a google search and found this in an earlier post but can't find the post right now
14
-
15
- You are using CD writer 1.02 with AOS22, which is compatible with Vista x64. Your software version is not compatible. XP works fine as you are using the XP version of the program.
16
-
17
- Use a CD Writer version 1.2 software.
18
-
19
- You will need to look in your Cd writing software. I know it's not simple but you will find the version 2.6 in there. I had a similar problem with some software I bought and it took a little investigation to determine that it wasn't the CD writer software.
20
-
21
- I have the new version 1.02 and have used the CD Writer 1. 4fefd39f24<br />
22
- <br />
23
- <br />
24
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/processing/__init__.py DELETED
File without changes
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe Acrobat Reader X The Power of PDF Productivity.md DELETED
@@ -1,167 +0,0 @@
1
-
2
- <h1>How to Download Adobe Reader X</h1>
3
- <p>Adobe Reader X is a free software that allows you to view, print, and comment on PDF files. PDF stands for Portable Document Format, a file format that preserves the layout, fonts, images, and hyperlinks of any document. PDF files are widely used for sharing information across different platforms and devices.</p>
4
- <h2>how to download adobe reader x</h2><br /><p><b><b>Download</b> &#9733; <a href="https://urlin.us/2uSWMb">https://urlin.us/2uSWMb</a></b></p><br /><br />
5
- <p>If you want to access PDF files on your computer or mobile device, you need Adobe Reader X. With this software, you can not only open and view PDFs, but also fill out forms, sign documents, add annotations, and more. Adobe Reader X also offers some advanced features, such as converting PDFs to other file formats, password protecting PDFs, comparing PDFs, and integrating with cloud storage services.</p>
6
- <p>In this article, we will show you how to download Adobe Reader X for Windows and Mac, as well as how to troubleshoot some common installation issues. Follow the steps below and enjoy the benefits of Adobe Reader X.</p>
7
- <h2>How to Download Adobe Reader X for Windows</h2>
8
- <p>If you are using a Windows computer, here are the steps to download and install Adobe Reader X:</p>
9
- <ol>
10
- <li><strong>Check your system requirements.</strong> Before you download Adobe Reader X, make sure that your computer meets the minimum system requirements. You can find them on <a href="(^4^)">this page</a>. You will need a Windows operating system (Windows Server or Windows XP/Vista/7/8/10), an Intel or AMD processor, at least 256 MB of RAM, at least 260 MB of hard disk space, a screen resolution of at least 1024 x 576 pixels, and an Internet browser (Internet Explorer or Firefox).</li>
11
- <li><strong>Go to the official Adobe website.</strong> Open your Internet browser and go to <a href="(^8^)">this page</a>. This is where you can download Acrobat Reader for free.</li>
12
- <li><strong>Choose your language and version.</strong> On the download page, you will see a drop-down menu where you can select your language. You can also choose whether you want to download Acrobat Reader for Windows (32-bit or 64-bit) or Mac OS. Make sure you select the correct version for your system.</li>
13
- <li><strong>Click the Download button.</strong> After choosing your language and version, click the yellow Download button. You will see a pop-up window asking you to save the file. Choose a location on your computer where you want to save the file and click Save.</li>
14
- <li><strong>Run the installer and follow the instructions.</strong> Once the download is complete, locate the file on your computer and double-click it to run the installer. You will see a welcome screen where you can choose whether you want to install Acrobat Reader as a default PDF viewer or not. Click Next and follow the on-screen instructions to complete the installation. You may need to restart your computer to finish the installation.</li>
15
- </ol>
16
- <p>Congratulations, you have successfully downloaded and installed Adobe Reader X for Windows. You can now open and view any PDF file on your computer with this software.</p>
17
- <p>How to download adobe reader x for windows 10<br />
18
- How to download adobe reader x offline installer<br />
19
- How to download adobe reader x free version<br />
20
- How to download adobe reader x for mac os<br />
21
- How to download adobe reader x update<br />
22
- How to download adobe reader x pro<br />
23
- How to download adobe reader x for android<br />
24
- How to download adobe reader x msi<br />
25
- How to download adobe reader x 10.1.16<br />
26
- How to download adobe reader x for chromebook<br />
27
- How to download adobe reader x for linux<br />
28
- How to download adobe reader x 64 bit<br />
29
- How to download adobe reader x portable<br />
30
- How to download adobe reader x full setup<br />
31
- How to download adobe reader x without internet<br />
32
- How to download adobe reader x for windows 7<br />
33
- How to download adobe reader x for ipad<br />
34
- How to download adobe reader x 10.0.0<br />
35
- How to download adobe reader x from official website<br />
36
- How to download adobe reader x with crack<br />
37
- How to download adobe reader x for windows 8.1<br />
38
- How to download adobe reader x for iphone<br />
39
- How to download adobe reader x 10.1.4<br />
40
- How to download adobe reader x in hindi<br />
41
- How to download adobe reader x for windows xp<br />
42
- How to download adobe reader x for kindle fire<br />
43
- How to download adobe reader x 10.1.1<br />
44
- How to download adobe reader x in tamil<br />
45
- How to download adobe reader x for windows vista<br />
46
- How to download adobe reader x for pc<br />
47
- How to download adobe reader x 10.0.1<br />
48
- How to download adobe reader x in urdu<br />
49
- How to download adobe reader x for macbook air<br />
50
- How to download adobe reader x for laptop<br />
51
- How to download adobe reader x 10.1.3<br />
52
- How to download adobe reader x in telugu<br />
53
- How to download adobe reader x for macbook pro<br />
54
- How to download adobe reader x for desktop<br />
55
- How to download adobe reader x 10.0.2<br />
56
- How to download adobe reader x in malayalam<br />
57
- How to download adobe reader x for mac os catalina<br />
58
- How to download adobe reader x for tablet<br />
59
- How to download adobe reader x 10.1.2<br />
60
- How to download adobe reader x in kannada<br />
61
- How to download adobe reader x for mac os mojave<br />
62
- How to download adobe reader x for chrome os<br />
63
- How to download adobe reader x 10.0.3<br />
64
- How to download adobe reader x in gujarati</p>
65
- <h2>How to Download Adobe Reader X for Mac</h2>
66
- <p>If you are using a Mac computer, here are the steps to download and install Adobe Reader X:</p>
67
- <ol>
68
- <li><strong>Check your system requirements.</strong> Before you download Adobe Reader X, make sure that your computer meets the minimum system requirements. You can find them on <a href="">this page</a>. You will need a Mac OS X operating system (version 10.5.8 or later), an Intel processor, at least 512 MB of RAM, at least 415 MB of hard disk space, a screen resolution of at least 1024 x 768 pixels, and an Internet browser (Safari or Firefox).</li>
69
- <li><strong>Go to the official Adobe website.</strong> Open your Internet browser and go to <a href="">this page</a>. This is where you can download Acrobat Reader for free.</li>
70
- <li><strong>Choose your language and version.</strong> On the download page, you will see a drop-down menu where you can select your language. You can also choose whether you want to download Acrobat Reader for Windows (32-bit or 64-bit) or Mac OS. Make sure you select the correct version for your system.</li>
71
- <li><strong>Click the Download button.</strong> After choosing your language and version, click the yellow Download button. You will see a pop-up window asking you to save the file. Choose a location on your computer where you want to save the file and click Save.</li>
72
- <li><strong>Open the DMG file and drag the icon to the Applications folder.</strong> Once the download is complete, locate the file on your computer and double-click it to open it. You will see a window with an icon of Adobe Reader X and a shortcut to the Applications folder. Drag the icon of Adobe Reader X to the Applications folder and drop it there. This will copy the software to your computer.</li>
73
- </ol>
74
- <p>Congratulations, you have successfully downloaded and installed Adobe Reader X for Mac. You can now open and view any PDF file on your computer with this software.</p>
75
- <h2>How to Troubleshoot Adobe Reader X Installation Issues</h2>
76
- <p>Sometimes, you may encounter some issues when installing or using Adobe Reader X. Here are some common issues and solutions that may help you fix them:</p>
77
- <h3>Reinstall Adobe Reader X</h3>
78
- <p>If Adobe Reader X does not work properly or crashes frequently, you may need to reinstall it. To do this, follow these steps:</p>
79
- <ul>
80
- <li>Uninstall Adobe Reader X from your computer. You can do this by going to Control Panel > Programs > Programs and Features (for Windows) or by dragging the icon of Adobe Reader X from the Applications folder to the Trash (for Mac).</li>
81
- <li>Delete any leftover files or folders related to Adobe Reader X from your computer. You can use a tool like <a href="">CCleaner</a> (for Windows) or <a href="">AppCleaner</a> (for Mac) to do this easily.</li>
82
- <li>Download and install Adobe Reader X again from the official website following the steps above.</li>
83
- </ul>
84
- <p>This should fix any corrupted or missing files that may cause problems with Adobe Reader X.</p>
85
- <h3>Disable Protected Mode at Startup</h3>
86
- <p>If Adobe Reader X does not open or displays an error message when opening a PDF file, you may need to disable Protected Mode at Startup. This is a security feature that prevents malicious code from running on your computer, but it may also interfere with some PDF files or features. To disable Protected Mode at Startup, follow these steps:</p>
87
- <ul>
88
- <li>Open Adobe Reader X on your computer.</li>
89
- <li>Go to Edit > Preferences (for Windows) or Acrobat > Preferences (for Mac).</li>
90
- <li>Select General from the left panel.</li>
91
- <li>Uncheck the box that says Enable Protected Mode at Startup.</li>
92
- <li>Click OK and restart Adobe Reader X.</li>
93
- </ul>
94
- <p>This should allow you to open any PDF file without errors or issues.</p>
95
- <h3>Check for permission issues</h3>
96
- <p>If Adobe Reader X does not save or print PDF files, you may need to check for permission issues. This means that you may not have enough access rights to modify or use certain files or folders on your computer. To check for permission issues, follow these steps:</p>
97
- <ul> <li>Right-click on the PDF file or folder that you want to save or print.</li>
98
- <li>Select Properties (for Windows) or Get Info (for Mac).</li>
99
- <li>Go to the Security tab (for Windows) or the Sharing & Permissions section (for Mac).</li>
100
- <li>Make sure that you have Full Control (for Windows) or Read & Write (for Mac) permissions for the file or folder.</li>
101
- <li>If not, click the Edit button (for Windows) or the lock icon (for Mac) and change the permissions accordingly.</li>
102
- <li>Click OK and try to save or print the PDF file again.</li>
103
- </ul>
104
- <p>This should resolve any permission issues that may prevent you from saving or printing PDF files.</p>
105
- <h3>Repair Installation</h3>
106
- <p>If Adobe Reader X does not launch or shows an error message when launching, you may need to repair the installation. This will fix any damaged or missing components that may affect the performance of Adobe Reader X. To repair the installation, follow these steps:</p>
107
- <ul>
108
- <li>Go to Control Panel > Programs > Programs and Features (for Windows) or Applications > Utilities > Adobe Installers (for Mac).</li>
109
- <li>Select Adobe Reader X from the list of programs and click the Change button (for Windows) or the Uninstall button (for Mac).</li>
110
- <li>Choose the Repair option and click Next (for Windows) or Continue (for Mac).</li>
111
- <li>Follow the on-screen instructions to complete the repair process.</li>
112
- <li>Restart your computer and try to launch Adobe Reader X again.</li>
113
- </ul>
114
- <p>This should fix any errors or issues that may prevent Adobe Reader X from launching.</p>
115
- <h3>Force open the files with Adobe Reader X</h3>
116
- <p>If Adobe Reader X does not open PDF files by default, you may need to force open them with Adobe Reader X. This will make sure that Adobe Reader X is the default program for opening PDF files on your computer. To force open PDF files with Adobe Reader X, follow these steps:</p>
117
- <ul>
118
- <li>Right-click on the PDF file that you want to open.</li>
119
- <li>Select Open With > Choose Another App (for Windows) or Open With > Other... (for Mac).</li>
120
- <li>Select Adobe Reader X from the list of programs and check the box that says Always use this app to open .pdf files (for Windows) or Always Open With (for Mac).</li>
121
- <li>Click OK and open the PDF file with Adobe Reader X.</li>
122
- </ul>
123
- <p>This should make Adobe Reader X the default program for opening PDF files on your computer.</p>
124
- <h2>Conclusion</h2>
125
- <p>In this article, we have shown you how to download Adobe Reader X for Windows and Mac, as well as how to troubleshoot some common installation issues. Adobe Reader X is a free software that allows you to view, print, and comment on PDF files. It also offers some advanced features, such as converting PDFs to other file formats, password protecting PDFs, comparing PDFs, and integrating with cloud storage services. With Adobe Reader X, you can access any PDF file on your computer or mobile device with ease and convenience.</p>
126
- <p>If you want to learn more about Adobe Reader X, you can visit <a href="">this page</a> for more information and resources. You can also check out <a href="">this page</a> for some tips and tricks on how to use Adobe Reader X effectively. We hope you have enjoyed this article and found it helpful. Thank you for reading!</p>
127
- <h2>FAQs</h2>
128
- <h3>What is the difference between Acrobat Reader and Acrobat Pro?</h3>
129
- <p>Acrobat Reader is a free software that allows you to view, print, and comment on PDF files. Acrobat Pro is a paid software that allows you to create, edit, convert, sign, and share PDF files. Acrobat Pro also has more features and tools than Acrobat Reader, such as OCR, redaction, optimization, accessibility, and collaboration.</p>
130
- <h3>How can I update Adobe Reader X to the latest version?</h3>
131
- <p>You can update Adobe Reader X to the latest version by following these steps:</p>
132
- <ul>
133
- <li>Open Adobe Reader X on your computer.</li>
134
- <li>Go to Help > Check for Updates.</li>
135
- <li>If there are any updates available, click the Download button and follow the instructions.</li>
136
- <li>Restart your computer and enjoy the latest version of Adobe Reader X.</li>
137
- </ul>
138
- <p>You can also enable automatic updates by going to Edit > Preferences > Updater and selecting Automatically install updates.</p>
139
- <h3>How can I open a password-protected PDF with Adobe Reader X?</h3>
140
- <p>You can open a password-protected PDF with Adobe Reader X by following these steps:</p>
141
- <ul>
142
- <li>Double-click on the PDF file that you want to open.</li>
143
- <li>Enter the password that was set by the creator of the PDF file.</li>
144
- <li>Click OK and view the PDF file with Adobe Reader X.</li>
145
- </ul>
146
- <p>If you do not know the password, you will not be able to open the PDF file. You will need to contact the creator of the PDF file and ask for the password.</p>
147
- <h3>How can I annotate PDFs with Adobe Reader X?</h3>
148
- <p>You can annotate PDFs with Adobe Reader X by following these steps:</p>
149
- <ul>
150
- <li>Open the PDF file that you want to annotate with Adobe Reader X.</li>
151
- <li>Go to View > Tools > Comment and click the Open button.</li>
152
- <li>Select the annotation tool that you want to use from the toolbar. You can choose from different types of annotations, such as highlight, underline, strikeout, sticky note, text box, stamp, and more.</li>
153
- <li>Click on the PDF file where you want to add the annotation and adjust it as needed.</li>
154
- <li>You can also edit, delete, or reply to your annotations by right-clicking on them and choosing the appropriate option.</li>
155
- </ul>
156
- <p>Your annotations will be saved with the PDF file and can be viewed by anyone who opens it with Adobe Reader X or any other PDF viewer.</p>
157
- <h3>How can I access my PDFs from anywhere with Adobe Reader X?</h3>
158
- <p>You can access your PDFs from anywhere with Adobe Reader X by following these steps:</p>
159
- <ul>
160
- <li>Create a free account on <a href="">Adobe Document Cloud</a>, a cloud storage service that allows you to store and access your PDF files online.</li>
161
- <li>Upload your PDF files to Adobe Document Cloud by going to File > Save As > Adobe Document Cloud or by dragging and dropping them to the Adobe Document Cloud window.</li>
162
- <li>Sign in to your Adobe Document Cloud account on any device that has Adobe Reader X installed or on any web browser that supports PDF viewing.</li>
163
- <li>Open and view your PDF files from Adobe Document Cloud with Adobe Reader X or any other PDF viewer.</li>
164
- </ul>
165
- <p>You can also share your PDF files with others, edit them online, or convert them to other file formats with Adobe Document Cloud.</p> 197e85843d<br />
166
- <br />
167
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street APK for PC How to Play the Stunning Racing Game on Your Laptop or Desktop.md DELETED
@@ -1,131 +0,0 @@
1
- <br />
2
- <h1>How to Download and Play CarX Street on PC</h1>
3
- <p>CarX Street is a racing game developed by CarX Technologies, LLC. It is an open-world street racer that lets you explore the large city and its surroundings, from busy city streets to spiral mountain roads and mesmerizing coastal highways. You can race to collect legendary racing cars and display them in your garage, or challenge other players in real network races. You can also build the car of your dreams using part tuning that unlocks all the physics of CarX Technology car behavior.</p>
4
- <p>If you are a fan of racing games, you might want to play CarX Street on your PC instead of your mobile device. Playing on PC has many advantages, such as a larger screen, better graphics, smoother performance, and more comfortable controls. In this article, we will show you how to download and install CarX Street on your PC using different emulators. We will also give you some tips and tricks to help you enjoy the game more.</p>
5
- <h2>carx street apk for pc</h2><br /><p><b><b>Download</b> ===> <a href="https://urlin.us/2uSS96">https://urlin.us/2uSS96</a></b></p><br /><br />
6
- <h2>What is CarX Street?</h2>
7
- <p>CarX Street is a simulation racing video game that offers realistic car physics and high-speed drifting. The game also features different map types from around the world, and players can choose from several different game modes. Players can compete against other players, or participate in races and events.</p>
8
- <h3>Features of CarX Street</h3>
9
- <p>Some of the features of CarX Street are:</p>
10
- <ul>
11
- <li>Open world: You can get on your vehicle and explore the entire virtual world. You can find hidden spots, shortcuts, and secrets.</li>
12
- <li>Free to play: You can download and play CarX Street for free. You can also earn in-game currency by completing tasks and challenges.</li>
13
- <li>Buying gas: You need to fuel up your car with the right gas for the next race at city gas stations. Different types of gas have different effects on your car's performance.</li>
14
- <li>Houses and garages: You can buy houses for your cars and assemble collections for every race mode. You can also customize your garage with various decorations.</li>
15
- <li>In-game shop: You can buy over 50 official vehicles from the best automakers in the world. You can also buy parts, accessories, paints, stickers, and more.</li>
16
- <li>Many types of vehicles: You can choose from different types of vehicles, such as sports cars, muscle cars, supercars, hypercars, SUVs, trucks, and more.</li>
17
- <li>Car customization: You can customize your car with a detailed car-building system. You can swap parts and trick out your car for a specific race. You can also upgrade the engine, transmission, body, suspension, and tires.</li>
18
- <li>In-game free currency: You can earn free currency by watching ads, completing tasks, or participating in events. You can use the free currency to buy items or unlock features.</li>
19
- </ul>
20
- <h3>Benefits of playing CarX Street on PC</h3>
21
- <p>Playing CarX Street on PC has many benefits, such as:</p>
22
- <ul>
23
- <li>Larger screen: You can enjoy the stunning graphics and details of the game on a bigger screen. You can also see more of the map and the surroundings.</li>
24
- <li>Better graphics: You can adjust the graphics settings to suit your PC's specifications. You can also experience higher resolution, frame rate, and quality.</li>
25
- <li>Smoother performance: You can avoid lagging, crashing, or overheating issues that might occur on mobile devices. You can also save battery life and storage space.</li>
26
- <li>More comfortable controls: You can use your keyboard and mouse to control your car more easily and precisely. You can also customize your key mapping according to your preference.</li>
27
- </ul>
28
- <h2>How to download <h2>How to download and install CarX Street on PC</h2>
29
- <p>If you want to play CarX Street on your PC, you will need to use an Android emulator. An emulator is a software that mimics the Android operating system on your computer, allowing you to run Android apps and games. There are many emulators available, but we will show you how to use three of the most popular ones: BlueStacks, NoxPlayer, and LDPlayer.</p>
30
- <h3>Using BlueStacks emulator</h3>
31
- <p>BlueStacks is one of the most widely used Android emulators, with over 500 million users worldwide. It is compatible with both Windows and Mac operating systems, and it has a user-friendly interface and advanced features. Here are the steps to download and install CarX Street on PC using BlueStacks:</p>
32
- <ol>
33
- <li>Download and install BlueStacks on your PC from [1](https://www.bluestacks.com/).</li>
34
- <li>Complete Google sign-in to access the Play Store, or do it later.</li>
35
- <li>Look for CarX Street in the search bar at the top right corner.</li>
36
- <li>Click to install CarX Street from the search results.</li>
37
- <li>Complete Google sign-in (if you skipped step 2) to install CarX Street.</li>
38
- <li>Click the CarX Street icon on the home screen to start playing.</li>
39
- </ol>
40
- <h3>Using NoxPlayer emulator</h3>
41
- <p>NoxPlayer is another popular Android emulator, with over 150 million users worldwide. It is also compatible with both Windows and Mac operating systems, and it has a simple and fast interface and performance. Here are the steps to download and install CarX Street on PC using NoxPlayer:</p>
42
- <ol>
43
- <li>Download and install NoxPlayer on your PC from [5](https://www.bignox.com/).</li>
44
- <li>Run the installation package and complete the installation.</li>
45
- <li>Open NoxPlayer and search for CarX Street in the Google Play Store.</li>
46
- <li>Install the game and launch it to start playing.</li>
47
- </ol>
48
- <h3>Using LDPlayer emulator</h3>
49
- <p>LDPlayer is a newer Android emulator, but it has gained popularity among gamers for its high performance and compatibility. It is also compatible with both Windows and Mac operating systems, and it has a smooth and stable interface and features. Here are the steps to download and install CarX Street on PC using LDPlayer:</p>
50
- <p>carx street racing game download for pc<br />
51
- carx street mod apk for pc<br />
52
- carx street pc emulator<br />
53
- carx street android game on pc<br />
54
- carx street free download for pc<br />
55
- carx street pc version<br />
56
- carx street pc requirements<br />
57
- carx street pc gameplay<br />
58
- carx street pc online<br />
59
- carx street pc windows 10<br />
60
- carx street pc bluestacks<br />
61
- carx street pc noxplayer<br />
62
- carx street pc ldplayer<br />
63
- carx street pc steam<br />
64
- carx street pc review<br />
65
- carx street pc cheats<br />
66
- carx street pc hack<br />
67
- carx street pc controller support<br />
68
- carx street pc graphics settings<br />
69
- carx street pc best cars<br />
70
- carx street pc tips and tricks<br />
71
- carx street pc update<br />
72
- carx street pc release date<br />
73
- carx street pc beta test<br />
74
- carx street pc download size<br />
75
- carx street apk for windows 7<br />
76
- carx street apk for windows 8.1<br />
77
- carx street apk for macbook<br />
78
- carx street apk for laptop<br />
79
- carx street apk for desktop<br />
80
- carx street apk for chromebook<br />
81
- carx street apk for linux<br />
82
- carx street apk for ubuntu<br />
83
- carx street apk for mac os x<br />
84
- carx street apk for windows xp<br />
85
- how to install carx street apk on pc<br />
86
- how to play carx street apk on pc<br />
87
- how to run carx street apk on pc<br />
88
- how to download carx street apk on pc<br />
89
- how to update carx street apk on pc<br />
90
- how to uninstall carx street apk on pc<br />
91
- how to transfer carx street apk from android to pc<br />
92
- how to sync carx street apk between android and pc<br />
93
- how to fix carx street apk not working on pc<br />
94
- how to get unlimited coins in carx street apk on pc<br />
95
- how to customize cars in carx street apk on pc<br />
96
- how to change language in carx street apk on pc<br />
97
- how to connect facebook in carx street apk on pc<br />
98
- how to record gameplay of carx street apk on pc</p>
99
- <ol>
100
- <li>Download and install LDPlayer on your PC from [6](https://www.ldplayer.net/).</li>
101
- <li>Open LDPlayer and search for CarX Street in the LD Store or Google Play Store.</li>
102
- <li>Install the game and launch it to start playing.</li>
103
- </ol>
104
- <h2>Tips and tricks for CarX Street</h2>
105
- <p>Now that you know how to play CarX Street on your PC, you might want some tips and tricks to help you improve your skills and enjoy the game more. Here are some of them:</p>
106
- <h3>Follow the tutorial</h3>
107
- <p>The game has a tutorial that will teach you the basics of driving, racing, drifting, tuning, and more. It is highly recommended that you follow the tutorial before jumping into the action, as it will help you get familiar with the game mechanics and controls. You can also revisit the tutorial anytime from the settings menu if you need a refresher.</p>
108
- <h3>Roam through the city for more rewards</h3>
109
- <p>The game has an open world that you can explore at your own pace. You can find hidden spots, shortcuts, secrets, and rewards by roaming through the city. You can also encounter random events, challenges, and races that will give you more money, reputation, or items. Roaming through the city is also a good way to practice your driving skills and test your car's performance.</p>
110
- <h3>Take part in sprints and clubs</h3>
111
- <p>The game has two main modes: sprints and clubs. Sprints are short races that last under a minute, where you have to reach the finish line as fast as possible. Clubs are longer, story-driven competitions where you have to join a club, defeat its boss, and prove yourself as the best driver in the city. Both modes offer different rewards and challenges, so try them both out and see which one suits your style more.</p>
112
- <h3>Go for the best cars and customize them</h3>
113
- <p>The game has over 50 official vehicles from the best automakers in the world. You can buy them with in-game currency or real money, or earn them by completing tasks or events. You can also customize your car with a detailed car-building system that lets you swap parts, upgrade components, paint colors, add stickers, and more. You can also customize your garage with various decorations and display your car collection. Go for the best cars and make them your own.</p>
114
- <h2>Conclusion</h2>
115
- <p>CarX Street is a fun and realistic racing game that lets you experience the thrill of street racing. You can explore the open world, collect and customize your cars, and compete with other players. You can also play CarX Street on your PC using an Android emulator, which will give you many benefits such as a larger screen, better graphics, smoother performance, and more comfortable controls. If you are looking for a racing game that will keep you entertained and challenged, you should give CarX Street a try.</p>
116
- <h2>FAQs</h2>
117
- <p>Here are some frequently asked questions about CarX Street:</p>
118
- <ul>
119
- <li><b>Q: How do I drift in CarX Street?</b></li>
120
- <li>A: Drifting is an essential skill in CarX Street, as it will help you take corners faster and earn more points. To drift, you need to press the brake button while turning the steering wheel. You can also use the handbrake button to initiate a drift. You can adjust the sensitivity and angle of the steering wheel in the settings menu.</li>
121
- <li><b>Q: How do I get more money in CarX Street?</b></li>
122
- <li>A: Money is the main currency in CarX Street, which you can use to buy cars, parts, gas, and more. You can earn money by completing races, events, tasks, or challenges. You can also watch ads or use real money to get more money.</li>
123
- <li><b>Q: How do I join a club in CarX Street?</b></li>
124
- <li>A: Clubs are groups of racers that compete for territory and reputation in the city. You can join a club by completing its entry race and defeating its boss. You can also create your own club or join an existing one from the club menu.</li>
125
- <li><b>Q: How do I upgrade my car in CarX Street?</b></li>
126
- <li>A: You can upgrade your car by buying and installing new parts from the shop or the garage. You can also tune your car by adjusting the engine, transmission, body, suspension, and tires parameters. Upgrading and tuning your car will improve its performance and handling.</li>
127
- <li><b>Q: How do I play with friends in CarX Street?</b></li>
128
- <li>A: You can play with friends in CarX Street by inviting them to join your club or your race. You can also chat with them using the in-game chat feature or voice chat feature. You can also add friends from the social menu or search for them by their nickname or ID.</li>
129
- </ul></p> 197e85843d<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/CapCut Edit Videos like a Pro with TikToks Official Video Editor and Video Maker - Free Download.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <h1>How to Download and Use CapCut Video Editor for TikTok</h1>
3
- <p>TikTok is one of the most popular social media platforms for creating and sharing short videos. Whether you want to make funny, educational, or inspirational videos, you need a good video editor to make them stand out. In this article, we will show you how to download and use CapCut, the official video editor and maker app for TikTok.</p>
4
- <h2>What is CapCut?</h2>
5
- <p>CapCut is a free video editor and maker app that is compatible with TikTok. It is developed by ByteDance, the same company that owns TikTok. CapCut allows you to edit videos on your mobile device with ease and fun. You can also use it to create videos for other social media platforms, such as YouTube, Instagram, Facebook, and WhatsApp.</p>
6
- <h2>download capcut video editor for tiktok</h2><br /><p><b><b>DOWNLOAD</b> &#187;&#187;&#187; <a href="https://jinyurl.com/2uNMjj">https://jinyurl.com/2uNMjj</a></b></p><br /><br />
7
- <h3>CapCut is a free video editor and maker app for TikTok</h3>
8
- <p>CapCut has everything you need to create stunning, high-quality videos. You can import your own videos and photos or record new ones in the app. You can also access a massive music library and exclusive TikTok songs. You can extract audio from videos or add your own voice-overs. You can also use AI tools to enhance your videos, such as auto captions, background removal, text-to-speech, motion tracking, and more.</p>
9
- <h3>CapCut offers basic and advanced editing features</h3>
10
- <p>CapCut has a user-friendly interface that lets you edit videos with simple gestures. You can trim, cut, merge, split, reverse, speed up, slow down, zoom in, zoom out, freeze, and animate your clips. You can also add text, stickers, filters, effects, transitions, and colors to your videos. You can use keyframe animation to customize every setting. You can also use chroma key to remove specific colors from videos. You can apply picture-in-picture (PIP) feature to add video and photo layers above the clip. You can also use the stabilizing feature to keep video footage steady.</p>
11
- <h3>CapCut supports direct exports to TikTok and other social media platforms</h3>
12
- <p>CapCut lets you export your videos in custom resolutions and formats. You can export your videos in HD quality and support 4K 60fps exports and smart HDR. You can also adjust the format and share your creativity on TikTok and other social media platforms with one tap.</p>
13
- <h2>How to Download CapCut for Android and iOS</h2>
14
- <p>Downloading CapCut is easy and fast. Here are the steps to download CapCut for Android and iOS devices.</p>
15
- <h3>Download CapCut from Google Play Store or Apple App Store</h3>
16
- <p>You can download CapCut for free from Google Play Store or Apple App Store. Just search for "CapCut" in the store and tap Install or Get. The app size is about 100 MB.</p>
17
- <h3>Open CapCut and tap New Project to start editing</h3>
18
- <p>Once you have downloaded CapCut, open it on your device. You don't need a TikTok account or any other type of account to use CapCut. You can start editing right away by tapping New Project on the home screen.</p>
19
- <h3>Select a video or photos to edit and tap Add</h3>
20
- <p>You can select a video or photos from your device gallery or record a new one in the app. You can also use the search feature to find videos and photos online. You can select multiple files and tap Add to import them to your project. You can also rearrange, delete, or duplicate the clips in your timeline.</p>
21
- <p>How to download capcut video editor for tiktok on android<br />
22
- Download capcut video editor for tiktok apk free<br />
23
- Best capcut video editor for tiktok tutorials and tips<br />
24
- Download capcut video editor for tiktok for pc windows 10<br />
25
- Capcut video editor for tiktok review and features<br />
26
- Download capcut video editor for tiktok mod apk<br />
27
- Capcut video editor for tiktok vs inshot comparison<br />
28
- Download capcut video editor for tiktok pro version<br />
29
- Capcut video editor for tiktok online without download<br />
30
- Download capcut video editor for tiktok ios iphone ipad<br />
31
- Capcut video editor for tiktok alternatives and similar apps<br />
32
- Download capcut video editor for tiktok macbook laptop<br />
33
- Capcut video editor for tiktok filters and effects guide<br />
34
- Download capcut video editor for tiktok latest version update<br />
35
- Capcut video editor for tiktok transitions and stickers tutorial<br />
36
- Download capcut video editor for tiktok no watermark<br />
37
- Capcut video editor for tiktok music and sound effects library<br />
38
- Download capcut video editor for tiktok premium unlocked<br />
39
- Capcut video editor for tiktok speed and reverse options<br />
40
- Download capcut video editor for tiktok cracked full version<br />
41
- Capcut video editor for tiktok crop and rotate tools<br />
42
- Download capcut video editor for tiktok from google play store<br />
43
- Capcut video editor for tiktok split and merge videos function<br />
44
- Download capcut video editor for tiktok from official website<br />
45
- Capcut video editor for tiktok text and font styles customization<br />
46
- Download capcut video editor for tiktok with bluestacks emulator<br />
47
- Capcut video editor for tiktok voice changer and dubbing feature<br />
48
- Download capcut video editor for tiktok without ads or subscription<br />
49
- Capcut video editor for tiktok chroma key and green screen effect<br />
50
- Download capcut video editor for tiktok with qr code scanner<br />
51
- Capcut video editor for tiktok slideshow and collage maker mode<br />
52
- Download capcut video editor for tiktok old version apk file<br />
53
- Capcut video editor for tiktok cutout and background changer tool<br />
54
- Download capcut video editor for tiktok on amazon fire tablet<br />
55
- Capcut video editor for tiktok gif and meme generator option<br />
56
- Download capcut video editor for tiktok on chromebook device<br />
57
- Capcut video editor for tiktok face swap and beauty filter feature<br />
58
- Download capcut video editor for tiktok on linux operating system<br />
59
- Capcut video editor for tiktok animation and drawing effect mode<br />
60
- Download capcut video editor for tiktok on smart tv or roku device</p>
61
- <h2>How to Use CapCut to Edit Videos for TikTok</h2>
62
- <p>Editing videos with CapCut is fun and easy. Here are some tips on how to use CapCut to edit videos for TikTok.</p>
63
- <h3>Use the editing tools to trim, crop, reverse, speed up, and animate your clips</h3>
64
- <p>You can use the editing tools at the bottom of the screen to adjust your clips. You can tap Trim to cut out unwanted parts of your video. You can tap Crop to change the aspect ratio and zoom in or out of your video. You can tap Reverse to play your video backwards. You can tap Speed to change the playback speed of your video. You can tap Animate to add motion effects to your video.</p>
65
- <h3>Add text, stickers, filters, effects, and music to your videos</h3>
66
- <p>You can add text, stickers, filters, effects, and music to your videos by tapping the icons on the right side of the screen. You can tap Text to add captions, titles, or subtitles to your video. You can tap Sticker to add emojis, icons, or images to your video. You can tap Filter to apply different color presets to your video. You can tap Effect to add various visual effects to your video. You can tap Music to add songs, sound effects, or voice-overs to your video.</p>
67
- <h3>Use the templates and styles to enhance your videos</h3>
68
- <p>You can use the templates and styles to enhance your videos by tapping the icons on the left side of the screen. You can tap Template to apply pre-made themes and layouts to your video. You can tap Style to apply different artistic styles and filters to your video.</p>
69
- <h3>Tap Export to save and share your videos</h3>
70
- <p>When you are done editing your video, you can tap Export at the top right corner of the screen. You can choose the resolution, format, and quality of your video. You can also enable watermark removal if you want. Then you can tap Save or Share to save your video to your device or share it directly on TikTok or other social media platforms.</p>
71
- <h2>Benefits of Using CapCut for TikTok Videos</h2>
72
- <p>Using CapCut for TikTok videos has many benefits. Here are some of them.</p>
73
- <h3>CapCut is easy to use and versatile</h3>
74
- <p>CapCut is designed for beginners and professionals alike. It has a simple and intuitive interface that lets you edit videos with ease and fun. It also has a lot of features and options that let you customize your videos according to your preferences and needs.</p>
75
- <h3>CapCut has a large library of sounds and animations</h3>
76
- <p>CapCut has a large library of sounds and animations that you can use for free. You can access thousands of songs and sound effects that are updated regularly. You can also use exclusive TikTok songs that are popular and trending. You can also use hundreds of animations that are dynamic and creative.</p>
77
- <h3>CapCut can create stunning, high-quality videos</h3>
78
- <p>CapCut can create stunning, high-quality videos that will impress your audience. You can export your videos in HD quality and support 4K 60fps exports and smart HDR. You can also use AI tools that will enhance your videos automatically.</p>
79
- <h1>Conclusion</h1>
80
- <p>CapCut is a free video editor and maker app for TikTok that you can download and use on your Android or iOS device. It has everything you need to create stunning, high-quality videos with ease and fun. You can also use it to create videos for other social media platforms, such as YouTube, Instagram, Facebook, and WhatsApp. If you want to make amazing TikTok videos, download CapCut today!</p>
81
- <h2>Frequently Asked Questions</h2>
82
- <h4>Is CapCut safe?</h4>
83
- <p>Yes, CapCut is safe and secure. It does not contain any viruses or malware. It also does not collect any personal information from users.</p>
84
- <h4>Is CapCut free?</h4>
85
- <p>Yes, CapCut is free and does not have any hidden fees or charges. It also does not have any annoying ads or watermarks.</p>
86
- <h4>How do I update CapCut?</h4>
87
- <p>You can update CapCut by going to the Google Play Store or the Apple App Store and tapping Update. You can also enable automatic updates in your device settings.</p>
88
- <h4>How do I delete CapCut?</h4>
89
- <p>You can delete CapCut by going to your device settings and tapping Apps or Applications. Then you can find CapCut and tap Uninstall or Delete. You can also delete CapCut by long-pressing the app icon and tapping Remove or Delete.</p>
90
- <h4>How do I contact CapCut support?</h4>
91
- <p>You can contact CapCut support by going to the app settings and tapping Feedback or Help. You can also email them at [email protected] or visit their website at https://www.capcut.net/.</p> 401be4b1e0<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/Dockerfile DELETED
@@ -1,36 +0,0 @@
1
- FROM node:18
2
-
3
-
4
- ARG DEBIAN_FRONTEND=noninteractive
5
-
6
- ENV BING_HEADER ""
7
-
8
- # Set home to the user's home directory
9
- ENV HOME=/home/user \
10
- PATH=/home/user/.local/bin:$PATH
11
-
12
- # Set up a new user named "user" with user ID 1000
13
- RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME
14
-
15
- # Switch to the "user" user
16
- USER user
17
-
18
- # Set the working directory to the user's home directory
19
- WORKDIR $HOME/app
20
-
21
- # Install app dependencies
22
- # A wildcard is used to ensure both package.json AND package-lock.json are copied
23
- # where available (npm@5+)
24
- COPY --chown=user package*.json $HOME/app/
25
-
26
- RUN npm install
27
-
28
- # Copy the current directory contents into the container at $HOME/app setting the owner to the user
29
- COPY --chown=user . $HOME/app/
30
-
31
- RUN npm run build
32
-
33
- ENV PORT 7860
34
- EXPOSE 7860
35
-
36
- CMD npm start
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/app/page.tsx DELETED
@@ -1,15 +0,0 @@
1
- import dynamic from 'next/dynamic'
2
-
3
- const DynamicComponentWithNoSSR = dynamic(
4
- () => import('../components/chat'),
5
- { ssr: false }
6
- )
7
-
8
- export default function IndexPage() {
9
- return (
10
- <>
11
- <div className="loading-spinner" />
12
- <DynamicComponentWithNoSSR />
13
- </>
14
- )
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/chat.tsx DELETED
@@ -1,93 +0,0 @@
1
- 'use client'
2
-
3
- import { useCallback, useEffect, useMemo, useState } from 'react'
4
- import { useAtom } from 'jotai'
5
- import Image from 'next/image'
6
- import { cn } from '@/lib/utils'
7
- import { ChatList } from '@/components/chat-list'
8
- import { ChatPanel } from '@/components/chat-panel'
9
- import { WelcomeScreen } from '@/components/welcome-screen'
10
- import { ChatScrollAnchor } from '@/components/chat-scroll-anchor'
11
- import { ToneSelector } from './tone-selector'
12
- import { ChatHeader } from './chat-header'
13
- import { ChatSuggestions } from './chat-suggestions'
14
- import { bingConversationStyleAtom } from '@/state'
15
- import { ButtonScrollToBottom } from '@/components/button-scroll-to-bottom'
16
- import StopIcon from '@/assets/images/stop.svg'
17
- import { useBing } from '@/lib/hooks/use-bing'
18
- import { ChatMessageModel } from '@/lib/bots/bing/types'
19
- import { ChatNotification } from './chat-notification'
20
- import { Settings } from './settings'
21
- import { ChatHistory } from './chat-history'
22
-
23
- export type ChatProps = React.ComponentProps<'div'> & { initialMessages?: ChatMessageModel[] }
24
-
25
- export default function Chat({ className }: ChatProps) {
26
-
27
- const [bingStyle, setBingStyle] = useAtom(bingConversationStyleAtom)
28
- const {
29
- messages,
30
- sendMessage,
31
- resetConversation,
32
- stopGenerating,
33
- setInput,
34
- bot,
35
- input,
36
- generating,
37
- isSpeaking,
38
- uploadImage,
39
- attachmentList,
40
- setAttachmentList,
41
- } = useBing()
42
-
43
- useEffect(() => {
44
- window.scrollTo({
45
- top: document.body.offsetHeight,
46
- behavior: 'smooth'
47
- })
48
- }, [])
49
-
50
- return (
51
- <div className="flex flex-1 flex-col">
52
- <Settings />
53
- <div className={cn('flex-1 pb-16', className)}>
54
- <ChatHeader />
55
- <WelcomeScreen setInput={setInput} />
56
- <ToneSelector type={bingStyle} onChange={setBingStyle} />
57
- {messages.length ? (
58
- <>
59
- <ChatList messages={messages} />
60
- <ChatScrollAnchor trackVisibility={generating} />
61
- <ChatNotification message={messages.at(-1)} bot={bot} />
62
- <ChatSuggestions setInput={setInput} suggestions={messages.at(-1)?.suggestedResponses} />
63
-
64
- {generating ? (
65
- <div className="flex h-10 items-center justify-center my-4">
66
- <button
67
- onClick={stopGenerating}
68
- className="typing-control-item stop"
69
- >
70
- <Image alt="stop" src={StopIcon} width={24} className="mr-1" />
71
- <span>停止响应</span>
72
- </button>
73
- </div>
74
- ) : null}
75
- </>
76
- ) : null}
77
- </div>
78
- <ChatPanel
79
- className="pt-24 z-10"
80
- isSpeaking={isSpeaking}
81
- generating={generating}
82
- sendMessage={sendMessage}
83
- input={input}
84
- setInput={setInput}
85
- resetConversation={resetConversation}
86
- uploadImage={uploadImage}
87
- attachmentList={attachmentList}
88
- setAttachmentList={setAttachmentList}
89
- />
90
- <ButtonScrollToBottom />
91
- </div>
92
- )
93
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/transform.py DELETED
@@ -1,45 +0,0 @@
1
- from torchvision.transforms import (
2
- Normalize,
3
- Compose,
4
- RandomResizedCrop,
5
- InterpolationMode,
6
- ToTensor,
7
- Resize,
8
- CenterCrop,
9
- )
10
-
11
-
12
- def _convert_to_rgb(image):
13
- return image.convert("RGB")
14
-
15
-
16
- def image_transform(
17
- image_size: int,
18
- is_train: bool,
19
- mean=(0.48145466, 0.4578275, 0.40821073),
20
- std=(0.26862954, 0.26130258, 0.27577711),
21
- ):
22
- normalize = Normalize(mean=mean, std=std)
23
- if is_train:
24
- return Compose(
25
- [
26
- RandomResizedCrop(
27
- image_size,
28
- scale=(0.9, 1.0),
29
- interpolation=InterpolationMode.BICUBIC,
30
- ),
31
- _convert_to_rgb,
32
- ToTensor(),
33
- normalize,
34
- ]
35
- )
36
- else:
37
- return Compose(
38
- [
39
- Resize(image_size, interpolation=InterpolationMode.BICUBIC),
40
- CenterCrop(image_size),
41
- _convert_to_rgb,
42
- ToTensor(),
43
- normalize,
44
- ]
45
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_adv.py DELETED
@@ -1,374 +0,0 @@
1
- import os
2
- import torch
3
- import torch.nn.functional as F
4
- import torch.nn as nn
5
- import numpy as np
6
-
7
- from text_to_speech.modules.tts.portaspeech.portaspeech import PortaSpeech
8
- from text_to_speech.modules.tts.syntaspeech.multi_window_disc import Discriminator
9
- from tasks.tts.fs import FastSpeechTask
10
- from text_to_speech.utils.audio.align import mel2token_to_dur
11
- from text_to_speech.utils.commons.hparams import hparams
12
- from text_to_speech.utils.metrics.diagonal_metrics import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate
13
- from text_to_speech.utils.nn.model_utils import num_params
14
- from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars
15
- from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0
16
- from text_to_speech.utils.audio.pitch_extractors import get_pitch
17
- from text_to_speech.utils.metrics.dtw import dtw as DTW
18
-
19
- from text_to_speech.utils.plot.plot import spec_to_figure
20
- from text_to_speech.utils.text.text_encoder import build_token_encoder
21
-
22
-
23
- class PortaSpeechAdvTask(FastSpeechTask):
24
- def __init__(self):
25
- super().__init__()
26
- data_dir = hparams['binary_data_dir']
27
- self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json')
28
- self.build_disc_model()
29
- self.mse_loss_fn = torch.nn.MSELoss()
30
-
31
- def build_tts_model(self):
32
- ph_dict_size = len(self.token_encoder)
33
- word_dict_size = len(self.word_encoder)
34
- self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams)
35
-
36
- self.gen_params = [p for p in self.model.parameters() if p.requires_grad]
37
- self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)]
38
- self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)]
39
- self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)]
40
- self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ]
41
-
42
- self.use_bert = True if len(self.bert_params) > 0 else False
43
-
44
- def build_disc_model(self):
45
- disc_win_num = hparams['disc_win_num']
46
- h = hparams['mel_disc_hidden_size']
47
- self.mel_disc = Discriminator(
48
- time_lengths=[32, 64, 128][:disc_win_num],
49
- freq_length=80, hidden_size=h, kernel=(3, 3)
50
- )
51
- self.disc_params = list(self.mel_disc.parameters())
52
-
53
- def on_train_start(self):
54
- super().on_train_start()
55
- for n, m in self.model.named_children():
56
- num_params(m, model_name=n)
57
- if hasattr(self.model, 'fvae'):
58
- for n, m in self.model.fvae.named_children():
59
- num_params(m, model_name=f'fvae.{n}')
60
-
61
- def _training_step(self, sample, batch_idx, optimizer_idx):
62
- loss_output = {}
63
- loss_weights = {}
64
- disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0
65
- if optimizer_idx == 0:
66
- #######################
67
- # Generator #
68
- #######################
69
- loss_output, model_out = self.run_model(sample, infer=False)
70
- self.model_out_gt = self.model_out = \
71
- {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)}
72
- if disc_start:
73
- mel_p = model_out['mel_out']
74
- if hasattr(self.model, 'out2mel'):
75
- mel_p = self.model.out2mel(mel_p)
76
- o_ = self.mel_disc(mel_p)
77
- p_, pc_ = o_['y'], o_['y_c']
78
- if p_ is not None:
79
- loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size()))
80
- loss_weights['a'] = hparams['lambda_mel_adv']
81
- if pc_ is not None:
82
- loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size()))
83
- loss_weights['ac'] = hparams['lambda_mel_adv']
84
- else:
85
- #######################
86
- # Discriminator #
87
- #######################
88
- if disc_start and self.global_step % hparams['disc_interval'] == 0:
89
- model_out = self.model_out_gt
90
- mel_g = sample['mels']
91
- mel_p = model_out['mel_out']
92
- o = self.mel_disc(mel_g)
93
- p, pc = o['y'], o['y_c']
94
- o_ = self.mel_disc(mel_p)
95
- p_, pc_ = o_['y'], o_['y_c']
96
- if p_ is not None:
97
- loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size()))
98
- loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size()))
99
- if pc_ is not None:
100
- loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size()))
101
- loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size()))
102
- total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad])
103
- loss_output['batch_size'] = sample['txt_tokens'].size()[0]
104
- return total_loss, loss_output
105
-
106
- def run_model(self, sample, infer=False, *args, **kwargs):
107
- txt_tokens = sample['txt_tokens']
108
- word_tokens = sample['word_tokens']
109
- spk_embed = sample.get('spk_embed')
110
- spk_id = sample.get('spk_ids')
111
- if not infer:
112
- output = self.model(txt_tokens, word_tokens,
113
- ph2word=sample['ph2word'],
114
- mel2word=sample['mel2word'],
115
- mel2ph=sample['mel2ph'],
116
- word_len=sample['word_lengths'].max(),
117
- tgt_mels=sample['mels'],
118
- pitch=sample.get('pitch'),
119
- spk_embed=spk_embed,
120
- spk_id=spk_id,
121
- infer=False,
122
- global_step=self.global_step,
123
- graph_lst=sample['graph_lst'],
124
- etypes_lst=sample['etypes_lst'],
125
- bert_feats=sample.get("bert_feats"),
126
- cl_feats=sample.get("cl_feats")
127
- )
128
- losses = {}
129
- losses['kl_v'] = output['kl'].detach()
130
- losses_kl = output['kl']
131
- losses_kl = torch.clamp(losses_kl, min=hparams['kl_min'])
132
- losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl
133
- losses_kl = losses_kl * hparams['lambda_kl']
134
- losses['kl'] = losses_kl
135
-
136
- self.add_mel_loss(output['mel_out'], sample['mels'], losses)
137
- if hparams['dur_level'] == 'word':
138
- self.add_dur_loss(
139
- output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
140
- self.get_attn_stats(output['attn'], sample, losses)
141
- else:
142
- super(PortaSpeechAdvTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
143
- return losses, output
144
- else:
145
- use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
146
- output = self.model(
147
- txt_tokens, word_tokens,
148
- ph2word=sample['ph2word'],
149
- word_len=sample['word_lengths'].max(),
150
- pitch=sample.get('pitch'),
151
- mel2ph=sample['mel2ph'] if use_gt_dur else None,
152
- mel2word=sample['mel2word'] if use_gt_dur else None,
153
- tgt_mels=sample['mels'],
154
- infer=True,
155
- spk_embed=spk_embed,
156
- spk_id=spk_id,
157
- graph_lst=sample['graph_lst'],
158
- etypes_lst=sample['etypes_lst'],
159
- bert_feats=sample.get("bert_feats"),
160
- cl_feats=sample.get("cl_feats")
161
- )
162
- return output
163
-
164
- def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None):
165
- T = word_len.max()
166
- dur_gt = mel2token_to_dur(mel2token, T).float()
167
- nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float()
168
- dur_pred = dur_pred * nonpadding
169
- dur_gt = dur_gt * nonpadding
170
- wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
171
- wdur = (wdur * nonpadding).sum() / nonpadding.sum()
172
-
173
- if hparams['lambda_word_dur'] > 0:
174
- losses['wdur'] = wdur * hparams['lambda_word_dur']
175
- if hparams['lambda_sent_dur'] > 0:
176
- sent_dur_p = dur_pred.sum(-1)
177
- sent_dur_g = dur_gt.sum(-1)
178
- sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean')
179
- losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
180
-
181
- with torch.no_grad():
182
- # calculate word-level abs_dur_error in micro-second
183
- abs_word_dur_error = F.l1_loss(dur_pred , dur_gt, reduction='none')
184
- abs_word_dur_error = (abs_word_dur_error * nonpadding).sum() / nonpadding.sum()
185
- abs_word_dur_error = abs_word_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] * 1000
186
- losses['abs_word_dur_error'] = abs_word_dur_error
187
- # calculate word-level abs_dur_error in second
188
- sent_dur_p = dur_pred.sum(-1)
189
- sent_dur_g = dur_gt.sum(-1)
190
- abs_sent_dur_error = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean').mean()
191
- abs_sent_dur_error = abs_sent_dur_error * hparams['hop_size'] / hparams['audio_sample_rate']
192
- losses['abs_sent_dur_error'] = abs_sent_dur_error
193
-
194
- def validation_step(self, sample, batch_idx):
195
- outputs = {}
196
- outputs['losses'] = {}
197
- outputs['losses'], model_out = self.run_model(sample)
198
- outputs['total_loss'] = sum(outputs['losses'].values())
199
- outputs['nsamples'] = sample['nsamples']
200
- outputs = tensors_to_scalars(outputs)
201
- if self.global_step % hparams['valid_infer_interval'] == 0 \
202
- and batch_idx < hparams['num_valid_plots']:
203
- valid_results = self.save_valid_result(sample, batch_idx, model_out)
204
- wav_gt = valid_results['wav_gt']
205
- mel_gt = valid_results['mel_gt']
206
- wav_pred = valid_results['wav_pred']
207
- mel_pred = valid_results['mel_pred']
208
- f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
209
- f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
210
- manhattan_distance = lambda x, y: np.abs(x - y)
211
- dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance)
212
- outputs['losses']['f0_dtw'] = dist / len(f0_gt_)
213
- return outputs
214
-
215
- def save_valid_result(self, sample, batch_idx, model_out):
216
- sr = hparams['audio_sample_rate']
217
- f0_gt = None
218
- mel_out = model_out['mel_out']
219
- if sample.get('f0') is not None:
220
- f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
221
- self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt)
222
-
223
- # if self.global_step > 0:
224
- wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt)
225
- self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr)
226
- # with gt duration
227
- model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True)
228
- dur_info = self.get_plot_dur_info(sample, model_out)
229
- del dur_info['dur_pred']
230
- wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
231
- self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr)
232
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}',
233
- dur_info=dur_info, f0s=f0_gt)
234
-
235
- # with pred duration
236
- if not hparams['use_gt_dur']:
237
- model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False)
238
- dur_info = self.get_plot_dur_info(sample, model_out)
239
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}',
240
- dur_info=dur_info, f0s=f0_gt)
241
- wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
242
- self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr)
243
- # gt wav
244
- mel_gt = sample['mels'][0].cpu()
245
- wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
246
- if self.global_step <= hparams['valid_infer_interval']:
247
- self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr)
248
-
249
- # add attn plot
250
- if self.global_step > 0 and hparams['dur_level'] == 'word':
251
- self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step)
252
-
253
- return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()}
254
-
255
- def get_attn_stats(self, attn, sample, logging_outputs, prefix=''):
256
- # diagonal_focus_rate
257
- txt_lengths = sample['txt_lengths'].float()
258
- mel_lengths = sample['mel_lengths'].float()
259
- src_padding_mask = sample['txt_tokens'].eq(0)
260
- target_padding_mask = sample['mels'].abs().sum(-1).eq(0)
261
- src_seg_mask = sample['txt_tokens'].eq(self.seg_idx)
262
- attn_ks = txt_lengths.float() / mel_lengths.float()
263
-
264
- focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data
265
- phone_coverage_rate = get_phone_coverage_rate(
266
- attn, src_padding_mask, src_seg_mask, target_padding_mask).mean()
267
- diagonal_focus_rate, diag_mask = get_diagonal_focus_rate(
268
- attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask)
269
- logging_outputs[f'{prefix}fr'] = focus_rate.mean().data
270
- logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data
271
- logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data
272
-
273
- def get_plot_dur_info(self, sample, model_out):
274
- if hparams['dur_level'] == 'word':
275
- T_txt = sample['word_lengths'].max()
276
- dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0]
277
- dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
278
- txt = sample['ph_words'][0].split(" ")
279
- else:
280
- T_txt = sample['txt_tokens'].shape[1]
281
- dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
282
- dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
283
- txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
284
- txt = txt.split(" ")
285
- return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
286
-
287
- def build_optimizer(self, model):
288
-
289
- optimizer_gen = torch.optim.AdamW(
290
- self.gen_params,
291
- lr=hparams['lr'],
292
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
293
- weight_decay=hparams['weight_decay'])
294
-
295
- optimizer_disc = torch.optim.AdamW(
296
- self.disc_params,
297
- lr=hparams['disc_lr'],
298
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
299
- **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None
300
-
301
- return [optimizer_gen, optimizer_disc]
302
-
303
- def build_scheduler(self, optimizer):
304
- return [
305
- FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler
306
- torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler
307
- **hparams["discriminator_scheduler_params"]),
308
- ]
309
-
310
- def on_before_optimization(self, opt_idx):
311
- if opt_idx == 0:
312
- nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm'])
313
- if self.use_bert:
314
- nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm'])
315
- nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm'])
316
- else:
317
- nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm'])
318
- else:
319
- nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"])
320
-
321
- def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx):
322
- if self.scheduler is not None:
323
- self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches'])
324
- self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches'])
325
-
326
- ############
327
- # infer
328
- ############
329
- def test_start(self):
330
- super().test_start()
331
- if hparams.get('save_attn', False):
332
- os.makedirs(f'{self.gen_dir}/attn', exist_ok=True)
333
- self.model.store_inverse_all()
334
-
335
- def test_step(self, sample, batch_idx):
336
- assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
337
- outputs = self.run_model(sample, infer=True)
338
- text = sample['text'][0]
339
- item_name = sample['item_name'][0]
340
- tokens = sample['txt_tokens'][0].cpu().numpy()
341
- mel_gt = sample['mels'][0].cpu().numpy()
342
- mel_pred = outputs['mel_out'][0].cpu().numpy()
343
- mel2ph = sample['mel2ph'][0].cpu().numpy()
344
- mel2ph_pred = None
345
- str_phs = self.token_encoder.decode(tokens, strip_padding=True)
346
- base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
347
- if text is not None:
348
- base_fn += text.replace(":", "$3A")[:80]
349
- base_fn = base_fn.replace(' ', '_')
350
- gen_dir = self.gen_dir
351
- wav_pred = self.vocoder.spec2wav(mel_pred)
352
- self.saving_result_pool.add_job(self.save_result, args=[
353
- wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
354
- if hparams['save_gt']:
355
- wav_gt = self.vocoder.spec2wav(mel_gt)
356
- self.saving_result_pool.add_job(self.save_result, args=[
357
- wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
358
- if hparams.get('save_attn', False):
359
- attn = outputs['attn'][0].cpu().numpy()
360
- np.save(f'{gen_dir}/attn/{item_name}.npy', attn)
361
- # save f0 for pitch dtw
362
- f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
363
- f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
364
- np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_)
365
- np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_)
366
-
367
- print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
368
- return {
369
- 'item_name': item_name,
370
- 'text': text,
371
- 'ph_tokens': self.token_encoder.decode(tokens.tolist()),
372
- 'wav_fn_pred': base_fn % 'P',
373
- 'wav_fn_gt': base_fn % 'G',
374
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/ldm/models/diffusion/__init__.py DELETED
File without changes
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/mobilevit-small_4xb32_2000e_3c_noF/mobilevit-small_4xb32_2000e_3c_noF.py DELETED
@@ -1,137 +0,0 @@
1
- model = dict(
2
- type='ImageClassifier',
3
- backbone=dict(type='MobileViT', arch='small'),
4
- neck=dict(type='GlobalAveragePooling'),
5
- head=dict(
6
- type='LinearClsHead',
7
- num_classes=7,
8
- in_channels=640,
9
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
10
- topk=(
11
- 1,
12
- 3,
13
- )))
14
- dataset_type = 'CustomDataset'
15
- data_preprocessor = dict(
16
- num_classes=6, mean=[
17
- 0,
18
- 0,
19
- 0,
20
- ], std=[
21
- 255,
22
- 255,
23
- 255,
24
- ], to_rgb=False)
25
- train_pipeline = [
26
- dict(type='LoadImageFromFile'),
27
- dict(type='RandomResizedCrop', scale=224),
28
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
29
- dict(type='PackInputs'),
30
- ]
31
- test_pipeline = [
32
- dict(type='LoadImageFromFile'),
33
- dict(type='ResizeEdge', scale=288, edge='short'),
34
- dict(type='CenterCrop', crop_size=256),
35
- dict(type='PackInputs'),
36
- ]
37
- train_dataloader = dict(
38
- pin_memory=True,
39
- persistent_workers=True,
40
- collate_fn=dict(type='default_collate'),
41
- batch_size=32,
42
- num_workers=5,
43
- dataset=dict(
44
- type='CustomDataset',
45
- data_root='data',
46
- with_label=True,
47
- ann_file='',
48
- data_prefix='train',
49
- pipeline=[
50
- dict(type='LoadImageFromFile'),
51
- dict(type='RandomResizedCrop', scale=224),
52
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
53
- dict(type='PackInputs'),
54
- ]),
55
- sampler=dict(type='DefaultSampler', shuffle=True))
56
- val_dataloader = dict(
57
- pin_memory=True,
58
- persistent_workers=True,
59
- collate_fn=dict(type='default_collate'),
60
- batch_size=32,
61
- num_workers=5,
62
- dataset=dict(
63
- type='CustomDataset',
64
- data_root='data',
65
- with_label=True,
66
- ann_file='',
67
- data_prefix='val',
68
- pipeline=[
69
- dict(type='LoadImageFromFile'),
70
- dict(type='ResizeEdge', scale=288, edge='short'),
71
- dict(type='CenterCrop', crop_size=256),
72
- dict(type='PackInputs'),
73
- ]),
74
- sampler=dict(type='DefaultSampler', shuffle=False))
75
- val_evaluator = dict(
76
- type='Accuracy', topk=(
77
- 1,
78
- 3,
79
- ))
80
- test_dataloader = dict(
81
- pin_memory=True,
82
- persistent_workers=True,
83
- collate_fn=dict(type='default_collate'),
84
- batch_size=32,
85
- num_workers=5,
86
- dataset=dict(
87
- type='CustomDataset',
88
- data_root='data',
89
- with_label=True,
90
- ann_file='',
91
- data_prefix='val',
92
- pipeline=[
93
- dict(type='LoadImageFromFile'),
94
- dict(type='ResizeEdge', scale=288, edge='short'),
95
- dict(type='CenterCrop', crop_size=256),
96
- dict(type='PackInputs'),
97
- ]),
98
- sampler=dict(type='DefaultSampler', shuffle=False))
99
- test_evaluator = dict(
100
- type='Accuracy', topk=(
101
- 1,
102
- 3,
103
- ))
104
- default_scope = 'mmpretrain'
105
- default_hooks = dict(
106
- timer=dict(type='IterTimerHook'),
107
- logger=dict(type='LoggerHook', interval=10),
108
- param_scheduler=dict(type='ParamSchedulerHook'),
109
- checkpoint=dict(type='CheckpointHook', save_best='auto', interval=10),
110
- sampler_seed=dict(type='DistSamplerSeedHook'),
111
- visualization=dict(type='VisualizationHook', enable=False))
112
- env_cfg = dict(
113
- cudnn_benchmark=False,
114
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
115
- dist_cfg=dict(backend='nccl'))
116
- vis_backends = [
117
- dict(type='LocalVisBackend'),
118
- ]
119
- visualizer = dict(
120
- type='UniversalVisualizer',
121
- vis_backends=[
122
- dict(type='LocalVisBackend'),
123
- dict(type='WandbVisBackend'),
124
- ])
125
- log_level = 'INFO'
126
- load_from = None
127
- resume = False
128
- randomness = dict(seed=None, deterministic=False)
129
- optim_wrapper = dict(
130
- optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001))
131
- param_scheduler = dict(type='StepLR', by_epoch=True, step_size=10, gamma=0.98)
132
- train_cfg = dict(by_epoch=True, max_epochs=2000, val_interval=10)
133
- val_cfg = dict()
134
- test_cfg = dict()
135
- auto_scale_lr = dict(base_batch_size=256)
136
- launcher = 'pytorch'
137
- work_dir = './work_dirs/mobilevit-small_4xb32_2000e_3c_noF'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgProfile/GradioGenOpenAi/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: GradioGenOpenAi
3
- emoji: ⚡
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateButtons.js DELETED
@@ -1,18 +0,0 @@
1
- import MergeStyle from './utils/MergeStyle.js';
2
- import Buttons from '../../buttons/Buttons.js';
3
- import CreateChild from './utils/CreateChild.js';
4
- import CreateChildren from './utils/CreateChildren.js';
5
-
6
- var CreateButtons = function (scene, data, view, styles, customBuilders) {
7
- data = MergeStyle(data, styles);
8
-
9
- // Replace data by child game object
10
- CreateChild(scene, data, 'background', view, styles, customBuilders);
11
- CreateChildren(scene, data, 'buttons', view, styles, customBuilders);
12
-
13
- var gameObject = new Buttons(scene, data);
14
- scene.add.existing(gameObject);
15
- return gameObject;
16
- };
17
-
18
- export default CreateButtons;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/uroman/bin/uroman-tsv.sh DELETED
@@ -1,28 +0,0 @@
1
- #!/usr/bin/env bash
2
- # Created by Thamme Gowda on June 17, 2019
3
-
4
- DIR=$(dirname "${BASH_SOURCE[0]}") # get the directory name
5
- # DIR=$(realpath "${DIR}") # resolve its full path if need be
6
-
7
- if [[ $# -lt 1 || $# -gt 2 ]]; then
8
- >&2 echo "ERROR: invalid args"
9
- >&2 echo "Usage: <input.tsv> [<output.tsv>]"
10
- exit 2
11
- fi
12
-
13
- INP=$1
14
- OUT=$2
15
-
16
- CMD=$DIR/uroman.pl
17
-
18
- function romanize(){
19
- paste <(cut -f1 $INP) <(cut -f2 $INP | $CMD)
20
- }
21
-
22
- if [[ -n $OUT ]]; then
23
- romanize > $OUT
24
- else
25
- romanize
26
- fi
27
-
28
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/Deploy_Restoration/net/utils.py DELETED
@@ -1,86 +0,0 @@
1
- import math
2
- import torch
3
- import torch.nn as nn
4
- import numpy as np
5
- from skimage.measure.simple_metrics import compare_psnr
6
- from torchvision import models
7
-
8
-
9
- def weights_init_kaiming(m):
10
- classname = m.__class__.__name__
11
- if classname.find('Conv') != -1:
12
- nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
13
- elif classname.find('Linear') != -1:
14
- nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
15
- elif classname.find('BatchNorm') != -1:
16
- # nn.init.uniform(m.weight.data, 1.0, 0.02)
17
- m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025)
18
- nn.init.constant(m.bias.data, 0.0)
19
-
20
- class VGG19_PercepLoss(nn.Module):
21
- """ Calculates perceptual loss in vgg19 space
22
- """
23
- def __init__(self, _pretrained_=True):
24
- super(VGG19_PercepLoss, self).__init__()
25
- self.vgg = models.vgg19(pretrained=_pretrained_).features
26
- for param in self.vgg.parameters():
27
- param.requires_grad_(False)
28
-
29
- def get_features(self, image, layers=None):
30
- if layers is None:
31
- layers = {'30': 'conv5_2'} # may add other layers
32
- features = {}
33
- x = image
34
- for name, layer in self.vgg._modules.items():
35
- x = layer(x)
36
- if name in layers:
37
- features[layers[name]] = x
38
- return features
39
-
40
- def forward(self, pred, true, layer='conv5_2'):
41
- true_f = self.get_features(true)
42
- pred_f = self.get_features(pred)
43
- return torch.mean((true_f[layer]-pred_f[layer])**2)
44
-
45
-
46
- def batch_PSNR(img, imclean, data_range):
47
- Img = img.data.cpu().numpy().astype(np.float32)
48
- Iclean = imclean.data.cpu().numpy().astype(np.float32)
49
- PSNR = 0
50
- for i in range(Img.shape[0]):
51
- PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range)
52
- return (PSNR/Img.shape[0])
53
-
54
- def data_augmentation(image, mode):
55
- out = np.transpose(image, (1,2,0))
56
- #out = image
57
- if mode == 0:
58
- # original
59
- out = out
60
- elif mode == 1:
61
- # flip up and down
62
- out = np.flipud(out)
63
- elif mode == 2:
64
- # rotate counterwise 90 degree
65
- out = np.rot90(out)
66
- elif mode == 3:
67
- # rotate 90 degree and flip up and down
68
- out = np.rot90(out)
69
- out = np.flipud(out)
70
- elif mode == 4:
71
- # rotate 180 degree
72
- out = np.rot90(out, k=2)
73
- elif mode == 5:
74
- # rotate 180 degree and flip
75
- out = np.rot90(out, k=2)
76
- out = np.flipud(out)
77
- elif mode == 6:
78
- # rotate 270 degree
79
- out = np.rot90(out, k=3)
80
- elif mode == 7:
81
- # rotate 270 degree and flip
82
- out = np.rot90(out, k=3)
83
- out = np.flipud(out)
84
- return np.transpose(out, (2,0,1))
85
- #return out
86
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/misc.py DELETED
@@ -1,262 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import re
10
- import contextlib
11
- import numpy as np
12
- import torch
13
- import warnings
14
- import dnnlib
15
-
16
- #----------------------------------------------------------------------------
17
- # Cached construction of constant tensors. Avoids CPU=>GPU copy when the
18
- # same constant is used multiple times.
19
-
20
- _constant_cache = dict()
21
-
22
- def constant(value, shape=None, dtype=None, device=None, memory_format=None):
23
- value = np.asarray(value)
24
- if shape is not None:
25
- shape = tuple(shape)
26
- if dtype is None:
27
- dtype = torch.get_default_dtype()
28
- if device is None:
29
- device = torch.device('cpu')
30
- if memory_format is None:
31
- memory_format = torch.contiguous_format
32
-
33
- key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
34
- tensor = _constant_cache.get(key, None)
35
- if tensor is None:
36
- tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
37
- if shape is not None:
38
- tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
39
- tensor = tensor.contiguous(memory_format=memory_format)
40
- _constant_cache[key] = tensor
41
- return tensor
42
-
43
- #----------------------------------------------------------------------------
44
- # Replace NaN/Inf with specified numerical values.
45
-
46
- try:
47
- nan_to_num = torch.nan_to_num # 1.8.0a0
48
- except AttributeError:
49
- def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
50
- assert isinstance(input, torch.Tensor)
51
- if posinf is None:
52
- posinf = torch.finfo(input.dtype).max
53
- if neginf is None:
54
- neginf = torch.finfo(input.dtype).min
55
- assert nan == 0
56
- return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
57
-
58
- #----------------------------------------------------------------------------
59
- # Symbolic assert.
60
-
61
- try:
62
- symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
63
- except AttributeError:
64
- symbolic_assert = torch.Assert # 1.7.0
65
-
66
- #----------------------------------------------------------------------------
67
- # Context manager to suppress known warnings in torch.jit.trace().
68
-
69
- class suppress_tracer_warnings(warnings.catch_warnings):
70
- def __enter__(self):
71
- super().__enter__()
72
- warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
73
- return self
74
-
75
- #----------------------------------------------------------------------------
76
- # Assert that the shape of a tensor matches the given list of integers.
77
- # None indicates that the size of a dimension is allowed to vary.
78
- # Performs symbolic assertion when used in torch.jit.trace().
79
-
80
- def assert_shape(tensor, ref_shape):
81
- if tensor.ndim != len(ref_shape):
82
- raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
83
- for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
84
- if ref_size is None:
85
- pass
86
- elif isinstance(ref_size, torch.Tensor):
87
- with suppress_tracer_warnings(): # as_tensor results are registered as constants
88
- symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
89
- elif isinstance(size, torch.Tensor):
90
- with suppress_tracer_warnings(): # as_tensor results are registered as constants
91
- symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
92
- elif size != ref_size:
93
- raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
94
-
95
- #----------------------------------------------------------------------------
96
- # Function decorator that calls torch.autograd.profiler.record_function().
97
-
98
- def profiled_function(fn):
99
- def decorator(*args, **kwargs):
100
- with torch.autograd.profiler.record_function(fn.__name__):
101
- return fn(*args, **kwargs)
102
- decorator.__name__ = fn.__name__
103
- return decorator
104
-
105
- #----------------------------------------------------------------------------
106
- # Sampler for torch.utils.data.DataLoader that loops over the dataset
107
- # indefinitely, shuffling items as it goes.
108
-
109
- class InfiniteSampler(torch.utils.data.Sampler):
110
- def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
111
- assert len(dataset) > 0
112
- assert num_replicas > 0
113
- assert 0 <= rank < num_replicas
114
- assert 0 <= window_size <= 1
115
- super().__init__(dataset)
116
- self.dataset = dataset
117
- self.rank = rank
118
- self.num_replicas = num_replicas
119
- self.shuffle = shuffle
120
- self.seed = seed
121
- self.window_size = window_size
122
-
123
- def __iter__(self):
124
- order = np.arange(len(self.dataset))
125
- rnd = None
126
- window = 0
127
- if self.shuffle:
128
- rnd = np.random.RandomState(self.seed)
129
- rnd.shuffle(order)
130
- window = int(np.rint(order.size * self.window_size))
131
-
132
- idx = 0
133
- while True:
134
- i = idx % order.size
135
- if idx % self.num_replicas == self.rank:
136
- yield order[i]
137
- if window >= 2:
138
- j = (i - rnd.randint(window)) % order.size
139
- order[i], order[j] = order[j], order[i]
140
- idx += 1
141
-
142
- #----------------------------------------------------------------------------
143
- # Utilities for operating with torch.nn.Module parameters and buffers.
144
-
145
- def params_and_buffers(module):
146
- assert isinstance(module, torch.nn.Module)
147
- return list(module.parameters()) + list(module.buffers())
148
-
149
- def named_params_and_buffers(module):
150
- assert isinstance(module, torch.nn.Module)
151
- return list(module.named_parameters()) + list(module.named_buffers())
152
-
153
- def copy_params_and_buffers(src_module, dst_module, require_all=False):
154
- assert isinstance(src_module, torch.nn.Module)
155
- assert isinstance(dst_module, torch.nn.Module)
156
- src_tensors = {name: tensor for name, tensor in named_params_and_buffers(src_module)}
157
- for name, tensor in named_params_and_buffers(dst_module):
158
- assert (name in src_tensors) or (not require_all)
159
- if name in src_tensors:
160
- tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
161
-
162
- #----------------------------------------------------------------------------
163
- # Context manager for easily enabling/disabling DistributedDataParallel
164
- # synchronization.
165
-
166
- @contextlib.contextmanager
167
- def ddp_sync(module, sync):
168
- assert isinstance(module, torch.nn.Module)
169
- if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
170
- yield
171
- else:
172
- with module.no_sync():
173
- yield
174
-
175
- #----------------------------------------------------------------------------
176
- # Check DistributedDataParallel consistency across processes.
177
-
178
- def check_ddp_consistency(module, ignore_regex=None):
179
- assert isinstance(module, torch.nn.Module)
180
- for name, tensor in named_params_and_buffers(module):
181
- fullname = type(module).__name__ + '.' + name
182
- if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
183
- continue
184
- tensor = tensor.detach()
185
- other = tensor.clone()
186
- torch.distributed.broadcast(tensor=other, src=0)
187
- assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
188
-
189
- #----------------------------------------------------------------------------
190
- # Print summary table of module hierarchy.
191
-
192
- def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
193
- assert isinstance(module, torch.nn.Module)
194
- assert not isinstance(module, torch.jit.ScriptModule)
195
- assert isinstance(inputs, (tuple, list))
196
-
197
- # Register hooks.
198
- entries = []
199
- nesting = [0]
200
- def pre_hook(_mod, _inputs):
201
- nesting[0] += 1
202
- def post_hook(mod, _inputs, outputs):
203
- nesting[0] -= 1
204
- if nesting[0] <= max_nesting:
205
- outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
206
- outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
207
- entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
208
- hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
209
- hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
210
-
211
- # Run module.
212
- outputs = module(*inputs)
213
- for hook in hooks:
214
- hook.remove()
215
-
216
- # Identify unique outputs, parameters, and buffers.
217
- tensors_seen = set()
218
- for e in entries:
219
- e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
220
- e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
221
- e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
222
- tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
223
-
224
- # Filter out redundant entries.
225
- if skip_redundant:
226
- entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
227
-
228
- # Construct table.
229
- rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
230
- rows += [['---'] * len(rows[0])]
231
- param_total = 0
232
- buffer_total = 0
233
- submodule_names = {mod: name for name, mod in module.named_modules()}
234
- for e in entries:
235
- name = '<top-level>' if e.mod is module else submodule_names[e.mod]
236
- param_size = sum(t.numel() for t in e.unique_params)
237
- buffer_size = sum(t.numel() for t in e.unique_buffers)
238
- output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
239
- output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
240
- rows += [[
241
- name + (':0' if len(e.outputs) >= 2 else ''),
242
- str(param_size) if param_size else '-',
243
- str(buffer_size) if buffer_size else '-',
244
- (output_shapes + ['-'])[0],
245
- (output_dtypes + ['-'])[0],
246
- ]]
247
- for idx in range(1, len(e.outputs)):
248
- rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
249
- param_total += param_size
250
- buffer_total += buffer_size
251
- rows += [['---'] * len(rows[0])]
252
- rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
253
-
254
- # Print table.
255
- widths = [max(len(cell) for cell in column) for column in zip(*rows)]
256
- print()
257
- for row in rows:
258
- print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
259
- print()
260
- return outputs
261
-
262
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/img2img.md DELETED
@@ -1,100 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 텍스트 기반 image-to-image 생성
14
-
15
- [[Colab에서 열기]]
16
-
17
- [`StableDiffusionImg2ImgPipeline`]을 사용하면 텍스트 프롬프트와 시작 이미지를 전달하여 새 이미지 생성의 조건을 지정할 수 있습니다.
18
-
19
- 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요:
20
-
21
- ```bash
22
- !pip install diffusers transformers ftfy accelerate
23
- ```
24
-
25
- [`nitrosocke/Ghibli-Diffusion`](https://huggingface.co/nitrosocke/Ghibli-Diffusion)과 같은 사전학습된 stable diffusion 모델로 [`StableDiffusionImg2ImgPipeline`]을 생성하여 시작하세요.
26
-
27
-
28
- ```python
29
- import torch
30
- import requests
31
- from PIL import Image
32
- from io import BytesIO
33
- from diffusers import StableDiffusionImg2ImgPipeline
34
-
35
- device = "cuda"
36
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained("nitrosocke/Ghibli-Diffusion", torch_dtype=torch.float16).to(
37
- device
38
- )
39
- ```
40
-
41
- 초기 이미지를 다운로드하고 사전 처리하여 파이프라인에 전달할 수 있습니다:
42
-
43
- ```python
44
- url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
45
-
46
- response = requests.get(url)
47
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
48
- init_image.thumbnail((768, 768))
49
- init_image
50
- ```
51
-
52
- <div class="flex justify-center">
53
- <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_8_output_0.jpeg"/>
54
- </div>
55
-
56
- <Tip>
57
-
58
- 💡 `strength`는 입력 이미지에 추가되는 노이즈의 양을 제어하는 0.0에서 1.0 사이의 값입니다. 1.0에 가까운 값은 다양한 변형을 허용하지만 입력 이미지와 의미적으로 일치하지 않는 이미지를 생성합니다.
59
-
60
- </Tip>
61
-
62
- 프롬프트를 정의하고(지브리 스타일(Ghibli-style)에 맞게 조정된 이 체크포인트의 경우 프롬프트 앞에 `ghibli style` 토큰을 붙여야 합니다) 파이프라인을 실행합니다:
63
-
64
- ```python
65
- prompt = "ghibli style, a fantasy landscape with castles"
66
- generator = torch.Generator(device=device).manual_seed(1024)
67
- image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
68
- image
69
- ```
70
-
71
- <div class="flex justify-center">
72
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ghibli-castles.png"/>
73
- </div>
74
-
75
- 다른 스케줄러로 실험하여 출력에 어떤 영향을 미치는지 확인할 수도 있습니다:
76
-
77
- ```python
78
- from diffusers import LMSDiscreteScheduler
79
-
80
- lms = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
81
- pipe.scheduler = lms
82
- generator = torch.Generator(device=device).manual_seed(1024)
83
- image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
84
- image
85
- ```
86
-
87
- <div class="flex justify-center">
88
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lms-ghibli.png"/>
89
- </div>
90
-
91
- 아래 공백을 확인하고 `strength` 값을 다르게 설정하여 이미지를 생성해 보세요. `strength`를 낮게 설정하면 원본 이미지와 더 유사한 이미지가 생성되는 것을 확인할 수 있습니다.
92
-
93
- 자유롭게 스케줄러를 [`LMSDiscreteScheduler`]로 전환하여 출력에 어떤 영향을 미치는지 확인해 보세요.
94
-
95
- <iframe
96
- src="https://stevhliu-ghibli-img2img.hf.space"
97
- frameborder="0"
98
- width="850"
99
- height="500"
100
- ></iframe>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion.py DELETED
@@ -1,959 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- import argparse
17
- import logging
18
- import math
19
- import os
20
- import random
21
- import shutil
22
- import warnings
23
- from pathlib import Path
24
-
25
- import numpy as np
26
- import PIL
27
- import torch
28
- import torch.nn.functional as F
29
- import torch.utils.checkpoint
30
- import transformers
31
- from accelerate import Accelerator
32
- from accelerate.logging import get_logger
33
- from accelerate.utils import ProjectConfiguration, set_seed
34
- from huggingface_hub import create_repo, upload_folder
35
-
36
- # TODO: remove and import from diffusers.utils when the new version of diffusers is released
37
- from packaging import version
38
- from PIL import Image
39
- from torch.utils.data import Dataset
40
- from torchvision import transforms
41
- from tqdm.auto import tqdm
42
- from transformers import CLIPTextModel, CLIPTokenizer
43
-
44
- import diffusers
45
- from diffusers import (
46
- AutoencoderKL,
47
- DDPMScheduler,
48
- DiffusionPipeline,
49
- DPMSolverMultistepScheduler,
50
- StableDiffusionPipeline,
51
- UNet2DConditionModel,
52
- )
53
- from diffusers.optimization import get_scheduler
54
- from diffusers.utils import check_min_version, is_wandb_available
55
- from diffusers.utils.import_utils import is_xformers_available
56
-
57
-
58
- if is_wandb_available():
59
- import wandb
60
-
61
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
62
- PIL_INTERPOLATION = {
63
- "linear": PIL.Image.Resampling.BILINEAR,
64
- "bilinear": PIL.Image.Resampling.BILINEAR,
65
- "bicubic": PIL.Image.Resampling.BICUBIC,
66
- "lanczos": PIL.Image.Resampling.LANCZOS,
67
- "nearest": PIL.Image.Resampling.NEAREST,
68
- }
69
- else:
70
- PIL_INTERPOLATION = {
71
- "linear": PIL.Image.LINEAR,
72
- "bilinear": PIL.Image.BILINEAR,
73
- "bicubic": PIL.Image.BICUBIC,
74
- "lanczos": PIL.Image.LANCZOS,
75
- "nearest": PIL.Image.NEAREST,
76
- }
77
- # ------------------------------------------------------------------------------
78
-
79
-
80
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
81
- check_min_version("0.19.0")
82
-
83
- logger = get_logger(__name__)
84
-
85
-
86
- def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None):
87
- img_str = ""
88
- for i, image in enumerate(images):
89
- image.save(os.path.join(repo_folder, f"image_{i}.png"))
90
- img_str += f"![img_{i}](./image_{i}.png)\n"
91
-
92
- yaml = f"""
93
- ---
94
- license: creativeml-openrail-m
95
- base_model: {base_model}
96
- tags:
97
- - stable-diffusion
98
- - stable-diffusion-diffusers
99
- - text-to-image
100
- - diffusers
101
- - textual_inversion
102
- inference: true
103
- ---
104
- """
105
- model_card = f"""
106
- # Textual inversion text2image fine-tuning - {repo_id}
107
- These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n
108
- {img_str}
109
- """
110
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
111
- f.write(yaml + model_card)
112
-
113
-
114
- def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
115
- logger.info(
116
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
117
- f" {args.validation_prompt}."
118
- )
119
- # create pipeline (note: unet and vae are loaded again in float32)
120
- pipeline = DiffusionPipeline.from_pretrained(
121
- args.pretrained_model_name_or_path,
122
- text_encoder=accelerator.unwrap_model(text_encoder),
123
- tokenizer=tokenizer,
124
- unet=unet,
125
- vae=vae,
126
- safety_checker=None,
127
- revision=args.revision,
128
- torch_dtype=weight_dtype,
129
- )
130
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
131
- pipeline = pipeline.to(accelerator.device)
132
- pipeline.set_progress_bar_config(disable=True)
133
-
134
- # run inference
135
- generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
136
- images = []
137
- for _ in range(args.num_validation_images):
138
- with torch.autocast("cuda"):
139
- image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
140
- images.append(image)
141
-
142
- for tracker in accelerator.trackers:
143
- if tracker.name == "tensorboard":
144
- np_images = np.stack([np.asarray(img) for img in images])
145
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
146
- if tracker.name == "wandb":
147
- tracker.log(
148
- {
149
- "validation": [
150
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images)
151
- ]
152
- }
153
- )
154
-
155
- del pipeline
156
- torch.cuda.empty_cache()
157
- return images
158
-
159
-
160
- def save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path):
161
- logger.info("Saving embeddings")
162
- learned_embeds = (
163
- accelerator.unwrap_model(text_encoder)
164
- .get_input_embeddings()
165
- .weight[min(placeholder_token_ids) : max(placeholder_token_ids) + 1]
166
- )
167
- learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
168
- torch.save(learned_embeds_dict, save_path)
169
-
170
-
171
- def parse_args():
172
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
173
- parser.add_argument(
174
- "--save_steps",
175
- type=int,
176
- default=500,
177
- help="Save learned_embeds.bin every X updates steps.",
178
- )
179
- parser.add_argument(
180
- "--save_as_full_pipeline",
181
- action="store_true",
182
- help="Save the complete stable diffusion pipeline.",
183
- )
184
- parser.add_argument(
185
- "--num_vectors",
186
- type=int,
187
- default=1,
188
- help="How many textual inversion vectors shall be used to learn the concept.",
189
- )
190
- parser.add_argument(
191
- "--pretrained_model_name_or_path",
192
- type=str,
193
- default=None,
194
- required=True,
195
- help="Path to pretrained model or model identifier from huggingface.co/models.",
196
- )
197
- parser.add_argument(
198
- "--revision",
199
- type=str,
200
- default=None,
201
- required=False,
202
- help="Revision of pretrained model identifier from huggingface.co/models.",
203
- )
204
- parser.add_argument(
205
- "--tokenizer_name",
206
- type=str,
207
- default=None,
208
- help="Pretrained tokenizer name or path if not the same as model_name",
209
- )
210
- parser.add_argument(
211
- "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
212
- )
213
- parser.add_argument(
214
- "--placeholder_token",
215
- type=str,
216
- default=None,
217
- required=True,
218
- help="A token to use as a placeholder for the concept.",
219
- )
220
- parser.add_argument(
221
- "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
222
- )
223
- parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
224
- parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
225
- parser.add_argument(
226
- "--output_dir",
227
- type=str,
228
- default="text-inversion-model",
229
- help="The output directory where the model predictions and checkpoints will be written.",
230
- )
231
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
232
- parser.add_argument(
233
- "--resolution",
234
- type=int,
235
- default=512,
236
- help=(
237
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
238
- " resolution"
239
- ),
240
- )
241
- parser.add_argument(
242
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
243
- )
244
- parser.add_argument(
245
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
246
- )
247
- parser.add_argument("--num_train_epochs", type=int, default=100)
248
- parser.add_argument(
249
- "--max_train_steps",
250
- type=int,
251
- default=5000,
252
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
253
- )
254
- parser.add_argument(
255
- "--gradient_accumulation_steps",
256
- type=int,
257
- default=1,
258
- help="Number of updates steps to accumulate before performing a backward/update pass.",
259
- )
260
- parser.add_argument(
261
- "--gradient_checkpointing",
262
- action="store_true",
263
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
264
- )
265
- parser.add_argument(
266
- "--learning_rate",
267
- type=float,
268
- default=1e-4,
269
- help="Initial learning rate (after the potential warmup period) to use.",
270
- )
271
- parser.add_argument(
272
- "--scale_lr",
273
- action="store_true",
274
- default=False,
275
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
276
- )
277
- parser.add_argument(
278
- "--lr_scheduler",
279
- type=str,
280
- default="constant",
281
- help=(
282
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
283
- ' "constant", "constant_with_warmup"]'
284
- ),
285
- )
286
- parser.add_argument(
287
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
288
- )
289
- parser.add_argument(
290
- "--lr_num_cycles",
291
- type=int,
292
- default=1,
293
- help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
294
- )
295
- parser.add_argument(
296
- "--dataloader_num_workers",
297
- type=int,
298
- default=0,
299
- help=(
300
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
301
- ),
302
- )
303
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
304
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
305
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
306
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
307
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
308
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
309
- parser.add_argument(
310
- "--hub_model_id",
311
- type=str,
312
- default=None,
313
- help="The name of the repository to keep in sync with the local `output_dir`.",
314
- )
315
- parser.add_argument(
316
- "--logging_dir",
317
- type=str,
318
- default="logs",
319
- help=(
320
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
321
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
322
- ),
323
- )
324
- parser.add_argument(
325
- "--mixed_precision",
326
- type=str,
327
- default="no",
328
- choices=["no", "fp16", "bf16"],
329
- help=(
330
- "Whether to use mixed precision. Choose"
331
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
332
- "and an Nvidia Ampere GPU."
333
- ),
334
- )
335
- parser.add_argument(
336
- "--allow_tf32",
337
- action="store_true",
338
- help=(
339
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
340
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
341
- ),
342
- )
343
- parser.add_argument(
344
- "--report_to",
345
- type=str,
346
- default="tensorboard",
347
- help=(
348
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
349
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
350
- ),
351
- )
352
- parser.add_argument(
353
- "--validation_prompt",
354
- type=str,
355
- default=None,
356
- help="A prompt that is used during validation to verify that the model is learning.",
357
- )
358
- parser.add_argument(
359
- "--num_validation_images",
360
- type=int,
361
- default=4,
362
- help="Number of images that should be generated during validation with `validation_prompt`.",
363
- )
364
- parser.add_argument(
365
- "--validation_steps",
366
- type=int,
367
- default=100,
368
- help=(
369
- "Run validation every X steps. Validation consists of running the prompt"
370
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
371
- " and logging the images."
372
- ),
373
- )
374
- parser.add_argument(
375
- "--validation_epochs",
376
- type=int,
377
- default=None,
378
- help=(
379
- "Deprecated in favor of validation_steps. Run validation every X epochs. Validation consists of running the prompt"
380
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
381
- " and logging the images."
382
- ),
383
- )
384
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
385
- parser.add_argument(
386
- "--checkpointing_steps",
387
- type=int,
388
- default=500,
389
- help=(
390
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
391
- " training using `--resume_from_checkpoint`."
392
- ),
393
- )
394
- parser.add_argument(
395
- "--checkpoints_total_limit",
396
- type=int,
397
- default=None,
398
- help=("Max number of checkpoints to store."),
399
- )
400
- parser.add_argument(
401
- "--resume_from_checkpoint",
402
- type=str,
403
- default=None,
404
- help=(
405
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
406
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
407
- ),
408
- )
409
- parser.add_argument(
410
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
411
- )
412
-
413
- args = parser.parse_args()
414
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
415
- if env_local_rank != -1 and env_local_rank != args.local_rank:
416
- args.local_rank = env_local_rank
417
-
418
- if args.train_data_dir is None:
419
- raise ValueError("You must specify a train data directory.")
420
-
421
- return args
422
-
423
-
424
- imagenet_templates_small = [
425
- "a photo of a {}",
426
- "a rendering of a {}",
427
- "a cropped photo of the {}",
428
- "the photo of a {}",
429
- "a photo of a clean {}",
430
- "a photo of a dirty {}",
431
- "a dark photo of the {}",
432
- "a photo of my {}",
433
- "a photo of the cool {}",
434
- "a close-up photo of a {}",
435
- "a bright photo of the {}",
436
- "a cropped photo of a {}",
437
- "a photo of the {}",
438
- "a good photo of the {}",
439
- "a photo of one {}",
440
- "a close-up photo of the {}",
441
- "a rendition of the {}",
442
- "a photo of the clean {}",
443
- "a rendition of a {}",
444
- "a photo of a nice {}",
445
- "a good photo of a {}",
446
- "a photo of the nice {}",
447
- "a photo of the small {}",
448
- "a photo of the weird {}",
449
- "a photo of the large {}",
450
- "a photo of a cool {}",
451
- "a photo of a small {}",
452
- ]
453
-
454
- imagenet_style_templates_small = [
455
- "a painting in the style of {}",
456
- "a rendering in the style of {}",
457
- "a cropped painting in the style of {}",
458
- "the painting in the style of {}",
459
- "a clean painting in the style of {}",
460
- "a dirty painting in the style of {}",
461
- "a dark painting in the style of {}",
462
- "a picture in the style of {}",
463
- "a cool painting in the style of {}",
464
- "a close-up painting in the style of {}",
465
- "a bright painting in the style of {}",
466
- "a cropped painting in the style of {}",
467
- "a good painting in the style of {}",
468
- "a close-up painting in the style of {}",
469
- "a rendition in the style of {}",
470
- "a nice painting in the style of {}",
471
- "a small painting in the style of {}",
472
- "a weird painting in the style of {}",
473
- "a large painting in the style of {}",
474
- ]
475
-
476
-
477
- class TextualInversionDataset(Dataset):
478
- def __init__(
479
- self,
480
- data_root,
481
- tokenizer,
482
- learnable_property="object", # [object, style]
483
- size=512,
484
- repeats=100,
485
- interpolation="bicubic",
486
- flip_p=0.5,
487
- set="train",
488
- placeholder_token="*",
489
- center_crop=False,
490
- ):
491
- self.data_root = data_root
492
- self.tokenizer = tokenizer
493
- self.learnable_property = learnable_property
494
- self.size = size
495
- self.placeholder_token = placeholder_token
496
- self.center_crop = center_crop
497
- self.flip_p = flip_p
498
-
499
- self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
500
-
501
- self.num_images = len(self.image_paths)
502
- self._length = self.num_images
503
-
504
- if set == "train":
505
- self._length = self.num_images * repeats
506
-
507
- self.interpolation = {
508
- "linear": PIL_INTERPOLATION["linear"],
509
- "bilinear": PIL_INTERPOLATION["bilinear"],
510
- "bicubic": PIL_INTERPOLATION["bicubic"],
511
- "lanczos": PIL_INTERPOLATION["lanczos"],
512
- }[interpolation]
513
-
514
- self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
515
- self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
516
-
517
- def __len__(self):
518
- return self._length
519
-
520
- def __getitem__(self, i):
521
- example = {}
522
- image = Image.open(self.image_paths[i % self.num_images])
523
-
524
- if not image.mode == "RGB":
525
- image = image.convert("RGB")
526
-
527
- placeholder_string = self.placeholder_token
528
- text = random.choice(self.templates).format(placeholder_string)
529
-
530
- example["input_ids"] = self.tokenizer(
531
- text,
532
- padding="max_length",
533
- truncation=True,
534
- max_length=self.tokenizer.model_max_length,
535
- return_tensors="pt",
536
- ).input_ids[0]
537
-
538
- # default to score-sde preprocessing
539
- img = np.array(image).astype(np.uint8)
540
-
541
- if self.center_crop:
542
- crop = min(img.shape[0], img.shape[1])
543
- (
544
- h,
545
- w,
546
- ) = (
547
- img.shape[0],
548
- img.shape[1],
549
- )
550
- img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
551
-
552
- image = Image.fromarray(img)
553
- image = image.resize((self.size, self.size), resample=self.interpolation)
554
-
555
- image = self.flip_transform(image)
556
- image = np.array(image).astype(np.uint8)
557
- image = (image / 127.5 - 1.0).astype(np.float32)
558
-
559
- example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
560
- return example
561
-
562
-
563
- def main():
564
- args = parse_args()
565
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
566
- accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
567
- accelerator = Accelerator(
568
- gradient_accumulation_steps=args.gradient_accumulation_steps,
569
- mixed_precision=args.mixed_precision,
570
- log_with=args.report_to,
571
- project_config=accelerator_project_config,
572
- )
573
-
574
- if args.report_to == "wandb":
575
- if not is_wandb_available():
576
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
577
-
578
- # Make one log on every process with the configuration for debugging.
579
- logging.basicConfig(
580
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
581
- datefmt="%m/%d/%Y %H:%M:%S",
582
- level=logging.INFO,
583
- )
584
- logger.info(accelerator.state, main_process_only=False)
585
- if accelerator.is_local_main_process:
586
- transformers.utils.logging.set_verbosity_warning()
587
- diffusers.utils.logging.set_verbosity_info()
588
- else:
589
- transformers.utils.logging.set_verbosity_error()
590
- diffusers.utils.logging.set_verbosity_error()
591
-
592
- # If passed along, set the training seed now.
593
- if args.seed is not None:
594
- set_seed(args.seed)
595
-
596
- # Handle the repository creation
597
- if accelerator.is_main_process:
598
- if args.output_dir is not None:
599
- os.makedirs(args.output_dir, exist_ok=True)
600
-
601
- if args.push_to_hub:
602
- repo_id = create_repo(
603
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
604
- ).repo_id
605
-
606
- # Load tokenizer
607
- if args.tokenizer_name:
608
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
609
- elif args.pretrained_model_name_or_path:
610
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
611
-
612
- # Load scheduler and models
613
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
614
- text_encoder = CLIPTextModel.from_pretrained(
615
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
616
- )
617
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
618
- unet = UNet2DConditionModel.from_pretrained(
619
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
620
- )
621
-
622
- # Add the placeholder token in tokenizer
623
- placeholder_tokens = [args.placeholder_token]
624
-
625
- if args.num_vectors < 1:
626
- raise ValueError(f"--num_vectors has to be larger or equal to 1, but is {args.num_vectors}")
627
-
628
- # add dummy tokens for multi-vector
629
- additional_tokens = []
630
- for i in range(1, args.num_vectors):
631
- additional_tokens.append(f"{args.placeholder_token}_{i}")
632
- placeholder_tokens += additional_tokens
633
-
634
- num_added_tokens = tokenizer.add_tokens(placeholder_tokens)
635
- if num_added_tokens != args.num_vectors:
636
- raise ValueError(
637
- f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
638
- " `placeholder_token` that is not already in the tokenizer."
639
- )
640
-
641
- # Convert the initializer_token, placeholder_token to ids
642
- token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
643
- # Check if initializer_token is a single token or a sequence of tokens
644
- if len(token_ids) > 1:
645
- raise ValueError("The initializer token must be a single token.")
646
-
647
- initializer_token_id = token_ids[0]
648
- placeholder_token_ids = tokenizer.convert_tokens_to_ids(placeholder_tokens)
649
-
650
- # Resize the token embeddings as we are adding new special tokens to the tokenizer
651
- text_encoder.resize_token_embeddings(len(tokenizer))
652
-
653
- # Initialise the newly added placeholder token with the embeddings of the initializer token
654
- token_embeds = text_encoder.get_input_embeddings().weight.data
655
- with torch.no_grad():
656
- for token_id in placeholder_token_ids:
657
- token_embeds[token_id] = token_embeds[initializer_token_id].clone()
658
-
659
- # Freeze vae and unet
660
- vae.requires_grad_(False)
661
- unet.requires_grad_(False)
662
- # Freeze all parameters except for the token embeddings in text encoder
663
- text_encoder.text_model.encoder.requires_grad_(False)
664
- text_encoder.text_model.final_layer_norm.requires_grad_(False)
665
- text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
666
-
667
- if args.gradient_checkpointing:
668
- # Keep unet in train mode if we are using gradient checkpointing to save memory.
669
- # The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode.
670
- unet.train()
671
- text_encoder.gradient_checkpointing_enable()
672
- unet.enable_gradient_checkpointing()
673
-
674
- if args.enable_xformers_memory_efficient_attention:
675
- if is_xformers_available():
676
- import xformers
677
-
678
- xformers_version = version.parse(xformers.__version__)
679
- if xformers_version == version.parse("0.0.16"):
680
- logger.warn(
681
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
682
- )
683
- unet.enable_xformers_memory_efficient_attention()
684
- else:
685
- raise ValueError("xformers is not available. Make sure it is installed correctly")
686
-
687
- # Enable TF32 for faster training on Ampere GPUs,
688
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
689
- if args.allow_tf32:
690
- torch.backends.cuda.matmul.allow_tf32 = True
691
-
692
- if args.scale_lr:
693
- args.learning_rate = (
694
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
695
- )
696
-
697
- # Initialize the optimizer
698
- optimizer = torch.optim.AdamW(
699
- text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
700
- lr=args.learning_rate,
701
- betas=(args.adam_beta1, args.adam_beta2),
702
- weight_decay=args.adam_weight_decay,
703
- eps=args.adam_epsilon,
704
- )
705
-
706
- # Dataset and DataLoaders creation:
707
- train_dataset = TextualInversionDataset(
708
- data_root=args.train_data_dir,
709
- tokenizer=tokenizer,
710
- size=args.resolution,
711
- placeholder_token=args.placeholder_token,
712
- repeats=args.repeats,
713
- learnable_property=args.learnable_property,
714
- center_crop=args.center_crop,
715
- set="train",
716
- )
717
- train_dataloader = torch.utils.data.DataLoader(
718
- train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
719
- )
720
- if args.validation_epochs is not None:
721
- warnings.warn(
722
- f"FutureWarning: You are doing logging with validation_epochs={args.validation_epochs}."
723
- " Deprecated validation_epochs in favor of `validation_steps`"
724
- f"Setting `args.validation_steps` to {args.validation_epochs * len(train_dataset)}",
725
- FutureWarning,
726
- stacklevel=2,
727
- )
728
- args.validation_steps = args.validation_epochs * len(train_dataset)
729
-
730
- # Scheduler and math around the number of training steps.
731
- overrode_max_train_steps = False
732
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
733
- if args.max_train_steps is None:
734
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
735
- overrode_max_train_steps = True
736
-
737
- lr_scheduler = get_scheduler(
738
- args.lr_scheduler,
739
- optimizer=optimizer,
740
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
741
- num_training_steps=args.max_train_steps * accelerator.num_processes,
742
- num_cycles=args.lr_num_cycles,
743
- )
744
-
745
- # Prepare everything with our `accelerator`.
746
- text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
747
- text_encoder, optimizer, train_dataloader, lr_scheduler
748
- )
749
-
750
- # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
751
- # as these weights are only used for inference, keeping weights in full precision is not required.
752
- weight_dtype = torch.float32
753
- if accelerator.mixed_precision == "fp16":
754
- weight_dtype = torch.float16
755
- elif accelerator.mixed_precision == "bf16":
756
- weight_dtype = torch.bfloat16
757
-
758
- # Move vae and unet to device and cast to weight_dtype
759
- unet.to(accelerator.device, dtype=weight_dtype)
760
- vae.to(accelerator.device, dtype=weight_dtype)
761
-
762
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
763
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
764
- if overrode_max_train_steps:
765
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
766
- # Afterwards we recalculate our number of training epochs
767
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
768
-
769
- # We need to initialize the trackers we use, and also store our configuration.
770
- # The trackers initializes automatically on the main process.
771
- if accelerator.is_main_process:
772
- accelerator.init_trackers("textual_inversion", config=vars(args))
773
-
774
- # Train!
775
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
776
-
777
- logger.info("***** Running training *****")
778
- logger.info(f" Num examples = {len(train_dataset)}")
779
- logger.info(f" Num Epochs = {args.num_train_epochs}")
780
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
781
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
782
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
783
- logger.info(f" Total optimization steps = {args.max_train_steps}")
784
- global_step = 0
785
- first_epoch = 0
786
- # Potentially load in the weights and states from a previous save
787
- if args.resume_from_checkpoint:
788
- if args.resume_from_checkpoint != "latest":
789
- path = os.path.basename(args.resume_from_checkpoint)
790
- else:
791
- # Get the most recent checkpoint
792
- dirs = os.listdir(args.output_dir)
793
- dirs = [d for d in dirs if d.startswith("checkpoint")]
794
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
795
- path = dirs[-1] if len(dirs) > 0 else None
796
-
797
- if path is None:
798
- accelerator.print(
799
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
800
- )
801
- args.resume_from_checkpoint = None
802
- else:
803
- accelerator.print(f"Resuming from checkpoint {path}")
804
- accelerator.load_state(os.path.join(args.output_dir, path))
805
- global_step = int(path.split("-")[1])
806
-
807
- resume_global_step = global_step * args.gradient_accumulation_steps
808
- first_epoch = global_step // num_update_steps_per_epoch
809
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
810
-
811
- # Only show the progress bar once on each machine.
812
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
813
- progress_bar.set_description("Steps")
814
-
815
- # keep original embeddings as reference
816
- orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone()
817
-
818
- for epoch in range(first_epoch, args.num_train_epochs):
819
- text_encoder.train()
820
- for step, batch in enumerate(train_dataloader):
821
- # Skip steps until we reach the resumed step
822
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
823
- if step % args.gradient_accumulation_steps == 0:
824
- progress_bar.update(1)
825
- continue
826
-
827
- with accelerator.accumulate(text_encoder):
828
- # Convert images to latent space
829
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach()
830
- latents = latents * vae.config.scaling_factor
831
-
832
- # Sample noise that we'll add to the latents
833
- noise = torch.randn_like(latents)
834
- bsz = latents.shape[0]
835
- # Sample a random timestep for each image
836
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
837
- timesteps = timesteps.long()
838
-
839
- # Add noise to the latents according to the noise magnitude at each timestep
840
- # (this is the forward diffusion process)
841
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
842
-
843
- # Get the text embedding for conditioning
844
- encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype)
845
-
846
- # Predict the noise residual
847
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
848
-
849
- # Get the target for loss depending on the prediction type
850
- if noise_scheduler.config.prediction_type == "epsilon":
851
- target = noise
852
- elif noise_scheduler.config.prediction_type == "v_prediction":
853
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
854
- else:
855
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
856
-
857
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
858
-
859
- accelerator.backward(loss)
860
-
861
- optimizer.step()
862
- lr_scheduler.step()
863
- optimizer.zero_grad()
864
-
865
- # Let's make sure we don't update any embedding weights besides the newly added token
866
- index_no_updates = torch.ones((len(tokenizer),), dtype=torch.bool)
867
- index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
868
-
869
- with torch.no_grad():
870
- accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
871
- index_no_updates
872
- ] = orig_embeds_params[index_no_updates]
873
-
874
- # Checks if the accelerator has performed an optimization step behind the scenes
875
- if accelerator.sync_gradients:
876
- images = []
877
- progress_bar.update(1)
878
- global_step += 1
879
- if global_step % args.save_steps == 0:
880
- save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
881
- save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path)
882
-
883
- if accelerator.is_main_process:
884
- if global_step % args.checkpointing_steps == 0:
885
- # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
886
- if args.checkpoints_total_limit is not None:
887
- checkpoints = os.listdir(args.output_dir)
888
- checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
889
- checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
890
-
891
- # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
892
- if len(checkpoints) >= args.checkpoints_total_limit:
893
- num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
894
- removing_checkpoints = checkpoints[0:num_to_remove]
895
-
896
- logger.info(
897
- f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
898
- )
899
- logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
900
-
901
- for removing_checkpoint in removing_checkpoints:
902
- removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
903
- shutil.rmtree(removing_checkpoint)
904
-
905
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
906
- accelerator.save_state(save_path)
907
- logger.info(f"Saved state to {save_path}")
908
-
909
- if args.validation_prompt is not None and global_step % args.validation_steps == 0:
910
- images = log_validation(
911
- text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch
912
- )
913
-
914
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
915
- progress_bar.set_postfix(**logs)
916
- accelerator.log(logs, step=global_step)
917
-
918
- if global_step >= args.max_train_steps:
919
- break
920
- # Create the pipeline using the trained modules and save it.
921
- accelerator.wait_for_everyone()
922
- if accelerator.is_main_process:
923
- if args.push_to_hub and not args.save_as_full_pipeline:
924
- logger.warn("Enabling full model saving because --push_to_hub=True was specified.")
925
- save_full_model = True
926
- else:
927
- save_full_model = args.save_as_full_pipeline
928
- if save_full_model:
929
- pipeline = StableDiffusionPipeline.from_pretrained(
930
- args.pretrained_model_name_or_path,
931
- text_encoder=accelerator.unwrap_model(text_encoder),
932
- vae=vae,
933
- unet=unet,
934
- tokenizer=tokenizer,
935
- )
936
- pipeline.save_pretrained(args.output_dir)
937
- # Save the newly trained embeddings
938
- save_path = os.path.join(args.output_dir, "learned_embeds.bin")
939
- save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path)
940
-
941
- if args.push_to_hub:
942
- save_model_card(
943
- repo_id,
944
- images=images,
945
- base_model=args.pretrained_model_name_or_path,
946
- repo_folder=args.output_dir,
947
- )
948
- upload_folder(
949
- repo_id=repo_id,
950
- folder_path=args.output_dir,
951
- commit_message="End of training",
952
- ignore_patterns=["step_*", "epoch_*"],
953
- )
954
-
955
- accelerator.end_training()
956
-
957
-
958
- if __name__ == "__main__":
959
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = './htc_hrnetv2p_w32_20e_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w40',
4
- backbone=dict(
5
- type='HRNet',
6
- extra=dict(
7
- stage2=dict(num_channels=(40, 80)),
8
- stage3=dict(num_channels=(40, 80, 160)),
9
- stage4=dict(num_channels=(40, 80, 160, 320)))),
10
- neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py DELETED
@@ -1,27 +0,0 @@
1
- from mmdet.models.builder import HEADS
2
- from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
3
- from .fused_semantic_head import FusedSemanticHead
4
-
5
-
6
- @HEADS.register_module()
7
- class SCNetSemanticHead(FusedSemanticHead):
8
- """Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
9
-
10
- Args:
11
- conv_to_res (bool, optional): if True, change the conv layers to
12
- ``SimplifiedBasicBlock``.
13
- """
14
-
15
- def __init__(self, conv_to_res=True, **kwargs):
16
- super(SCNetSemanticHead, self).__init__(**kwargs)
17
- self.conv_to_res = conv_to_res
18
- if self.conv_to_res:
19
- num_res_blocks = self.num_convs // 2
20
- self.convs = ResLayer(
21
- SimplifiedBasicBlock,
22
- self.in_channels,
23
- self.conv_out_channels,
24
- num_res_blocks,
25
- conv_cfg=self.conv_cfg,
26
- norm_cfg=self.norm_cfg)
27
- self.num_convs = num_res_blocks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = './fcn_hr18_480x480_80k_pascal_context_59.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w48',
4
- backbone=dict(
5
- extra=dict(
6
- stage2=dict(num_channels=(48, 96)),
7
- stage3=dict(num_channels=(48, 96, 192)),
8
- stage4=dict(num_channels=(48, 96, 192, 384)))),
9
- decode_head=dict(
10
- in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv_custom/checkpoint.py DELETED
@@ -1,500 +0,0 @@
1
- # Copyright (c) Open-MMLab. All rights reserved.
2
- import io
3
- import os
4
- import os.path as osp
5
- import pkgutil
6
- import time
7
- import warnings
8
- from collections import OrderedDict
9
- from importlib import import_module
10
- from tempfile import TemporaryDirectory
11
-
12
- import torch
13
- import torchvision
14
- from torch.optim import Optimizer
15
- from torch.utils import model_zoo
16
- from torch.nn import functional as F
17
-
18
- import annotator.uniformer.mmcv as mmcv
19
- from annotator.uniformer.mmcv.fileio import FileClient
20
- from annotator.uniformer.mmcv.fileio import load as load_file
21
- from annotator.uniformer.mmcv.parallel import is_module_wrapper
22
- from annotator.uniformer.mmcv.utils import mkdir_or_exist
23
- from annotator.uniformer.mmcv.runner import get_dist_info
24
-
25
- ENV_MMCV_HOME = 'MMCV_HOME'
26
- ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
27
- DEFAULT_CACHE_DIR = '~/.cache'
28
-
29
-
30
- def _get_mmcv_home():
31
- mmcv_home = os.path.expanduser(
32
- os.getenv(
33
- ENV_MMCV_HOME,
34
- os.path.join(
35
- os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
36
-
37
- mkdir_or_exist(mmcv_home)
38
- return mmcv_home
39
-
40
-
41
- def load_state_dict(module, state_dict, strict=False, logger=None):
42
- """Load state_dict to a module.
43
-
44
- This method is modified from :meth:`torch.nn.Module.load_state_dict`.
45
- Default value for ``strict`` is set to ``False`` and the message for
46
- param mismatch will be shown even if strict is False.
47
-
48
- Args:
49
- module (Module): Module that receives the state_dict.
50
- state_dict (OrderedDict): Weights.
51
- strict (bool): whether to strictly enforce that the keys
52
- in :attr:`state_dict` match the keys returned by this module's
53
- :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
54
- logger (:obj:`logging.Logger`, optional): Logger to log the error
55
- message. If not specified, print function will be used.
56
- """
57
- unexpected_keys = []
58
- all_missing_keys = []
59
- err_msg = []
60
-
61
- metadata = getattr(state_dict, '_metadata', None)
62
- state_dict = state_dict.copy()
63
- if metadata is not None:
64
- state_dict._metadata = metadata
65
-
66
- # use _load_from_state_dict to enable checkpoint version control
67
- def load(module, prefix=''):
68
- # recursively check parallel module in case that the model has a
69
- # complicated structure, e.g., nn.Module(nn.Module(DDP))
70
- if is_module_wrapper(module):
71
- module = module.module
72
- local_metadata = {} if metadata is None else metadata.get(
73
- prefix[:-1], {})
74
- module._load_from_state_dict(state_dict, prefix, local_metadata, True,
75
- all_missing_keys, unexpected_keys,
76
- err_msg)
77
- for name, child in module._modules.items():
78
- if child is not None:
79
- load(child, prefix + name + '.')
80
-
81
- load(module)
82
- load = None # break load->load reference cycle
83
-
84
- # ignore "num_batches_tracked" of BN layers
85
- missing_keys = [
86
- key for key in all_missing_keys if 'num_batches_tracked' not in key
87
- ]
88
-
89
- if unexpected_keys:
90
- err_msg.append('unexpected key in source '
91
- f'state_dict: {", ".join(unexpected_keys)}\n')
92
- if missing_keys:
93
- err_msg.append(
94
- f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
95
-
96
- rank, _ = get_dist_info()
97
- if len(err_msg) > 0 and rank == 0:
98
- err_msg.insert(
99
- 0, 'The model and loaded state dict do not match exactly\n')
100
- err_msg = '\n'.join(err_msg)
101
- if strict:
102
- raise RuntimeError(err_msg)
103
- elif logger is not None:
104
- logger.warning(err_msg)
105
- else:
106
- print(err_msg)
107
-
108
-
109
- def load_url_dist(url, model_dir=None):
110
- """In distributed setting, this function only download checkpoint at local
111
- rank 0."""
112
- rank, world_size = get_dist_info()
113
- rank = int(os.environ.get('LOCAL_RANK', rank))
114
- if rank == 0:
115
- checkpoint = model_zoo.load_url(url, model_dir=model_dir)
116
- if world_size > 1:
117
- torch.distributed.barrier()
118
- if rank > 0:
119
- checkpoint = model_zoo.load_url(url, model_dir=model_dir)
120
- return checkpoint
121
-
122
-
123
- def load_pavimodel_dist(model_path, map_location=None):
124
- """In distributed setting, this function only download checkpoint at local
125
- rank 0."""
126
- try:
127
- from pavi import modelcloud
128
- except ImportError:
129
- raise ImportError(
130
- 'Please install pavi to load checkpoint from modelcloud.')
131
- rank, world_size = get_dist_info()
132
- rank = int(os.environ.get('LOCAL_RANK', rank))
133
- if rank == 0:
134
- model = modelcloud.get(model_path)
135
- with TemporaryDirectory() as tmp_dir:
136
- downloaded_file = osp.join(tmp_dir, model.name)
137
- model.download(downloaded_file)
138
- checkpoint = torch.load(downloaded_file, map_location=map_location)
139
- if world_size > 1:
140
- torch.distributed.barrier()
141
- if rank > 0:
142
- model = modelcloud.get(model_path)
143
- with TemporaryDirectory() as tmp_dir:
144
- downloaded_file = osp.join(tmp_dir, model.name)
145
- model.download(downloaded_file)
146
- checkpoint = torch.load(
147
- downloaded_file, map_location=map_location)
148
- return checkpoint
149
-
150
-
151
- def load_fileclient_dist(filename, backend, map_location):
152
- """In distributed setting, this function only download checkpoint at local
153
- rank 0."""
154
- rank, world_size = get_dist_info()
155
- rank = int(os.environ.get('LOCAL_RANK', rank))
156
- allowed_backends = ['ceph']
157
- if backend not in allowed_backends:
158
- raise ValueError(f'Load from Backend {backend} is not supported.')
159
- if rank == 0:
160
- fileclient = FileClient(backend=backend)
161
- buffer = io.BytesIO(fileclient.get(filename))
162
- checkpoint = torch.load(buffer, map_location=map_location)
163
- if world_size > 1:
164
- torch.distributed.barrier()
165
- if rank > 0:
166
- fileclient = FileClient(backend=backend)
167
- buffer = io.BytesIO(fileclient.get(filename))
168
- checkpoint = torch.load(buffer, map_location=map_location)
169
- return checkpoint
170
-
171
-
172
- def get_torchvision_models():
173
- model_urls = dict()
174
- for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
175
- if ispkg:
176
- continue
177
- _zoo = import_module(f'torchvision.models.{name}')
178
- if hasattr(_zoo, 'model_urls'):
179
- _urls = getattr(_zoo, 'model_urls')
180
- model_urls.update(_urls)
181
- return model_urls
182
-
183
-
184
- def get_external_models():
185
- mmcv_home = _get_mmcv_home()
186
- default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
187
- default_urls = load_file(default_json_path)
188
- assert isinstance(default_urls, dict)
189
- external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
190
- if osp.exists(external_json_path):
191
- external_urls = load_file(external_json_path)
192
- assert isinstance(external_urls, dict)
193
- default_urls.update(external_urls)
194
-
195
- return default_urls
196
-
197
-
198
- def get_mmcls_models():
199
- mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
200
- mmcls_urls = load_file(mmcls_json_path)
201
-
202
- return mmcls_urls
203
-
204
-
205
- def get_deprecated_model_names():
206
- deprecate_json_path = osp.join(mmcv.__path__[0],
207
- 'model_zoo/deprecated.json')
208
- deprecate_urls = load_file(deprecate_json_path)
209
- assert isinstance(deprecate_urls, dict)
210
-
211
- return deprecate_urls
212
-
213
-
214
- def _process_mmcls_checkpoint(checkpoint):
215
- state_dict = checkpoint['state_dict']
216
- new_state_dict = OrderedDict()
217
- for k, v in state_dict.items():
218
- if k.startswith('backbone.'):
219
- new_state_dict[k[9:]] = v
220
- new_checkpoint = dict(state_dict=new_state_dict)
221
-
222
- return new_checkpoint
223
-
224
-
225
- def _load_checkpoint(filename, map_location=None):
226
- """Load checkpoint from somewhere (modelzoo, file, url).
227
-
228
- Args:
229
- filename (str): Accept local filepath, URL, ``torchvision://xxx``,
230
- ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
231
- details.
232
- map_location (str | None): Same as :func:`torch.load`. Default: None.
233
-
234
- Returns:
235
- dict | OrderedDict: The loaded checkpoint. It can be either an
236
- OrderedDict storing model weights or a dict containing other
237
- information, which depends on the checkpoint.
238
- """
239
- if filename.startswith('modelzoo://'):
240
- warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
241
- 'use "torchvision://" instead')
242
- model_urls = get_torchvision_models()
243
- model_name = filename[11:]
244
- checkpoint = load_url_dist(model_urls[model_name])
245
- elif filename.startswith('torchvision://'):
246
- model_urls = get_torchvision_models()
247
- model_name = filename[14:]
248
- checkpoint = load_url_dist(model_urls[model_name])
249
- elif filename.startswith('open-mmlab://'):
250
- model_urls = get_external_models()
251
- model_name = filename[13:]
252
- deprecated_urls = get_deprecated_model_names()
253
- if model_name in deprecated_urls:
254
- warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '
255
- f'of open-mmlab://{deprecated_urls[model_name]}')
256
- model_name = deprecated_urls[model_name]
257
- model_url = model_urls[model_name]
258
- # check if is url
259
- if model_url.startswith(('http://', 'https://')):
260
- checkpoint = load_url_dist(model_url)
261
- else:
262
- filename = osp.join(_get_mmcv_home(), model_url)
263
- if not osp.isfile(filename):
264
- raise IOError(f'{filename} is not a checkpoint file')
265
- checkpoint = torch.load(filename, map_location=map_location)
266
- elif filename.startswith('mmcls://'):
267
- model_urls = get_mmcls_models()
268
- model_name = filename[8:]
269
- checkpoint = load_url_dist(model_urls[model_name])
270
- checkpoint = _process_mmcls_checkpoint(checkpoint)
271
- elif filename.startswith(('http://', 'https://')):
272
- checkpoint = load_url_dist(filename)
273
- elif filename.startswith('pavi://'):
274
- model_path = filename[7:]
275
- checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
276
- elif filename.startswith('s3://'):
277
- checkpoint = load_fileclient_dist(
278
- filename, backend='ceph', map_location=map_location)
279
- else:
280
- if not osp.isfile(filename):
281
- raise IOError(f'{filename} is not a checkpoint file')
282
- checkpoint = torch.load(filename, map_location=map_location)
283
- return checkpoint
284
-
285
-
286
- def load_checkpoint(model,
287
- filename,
288
- map_location='cpu',
289
- strict=False,
290
- logger=None):
291
- """Load checkpoint from a file or URI.
292
-
293
- Args:
294
- model (Module): Module to load checkpoint.
295
- filename (str): Accept local filepath, URL, ``torchvision://xxx``,
296
- ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
297
- details.
298
- map_location (str): Same as :func:`torch.load`.
299
- strict (bool): Whether to allow different params for the model and
300
- checkpoint.
301
- logger (:mod:`logging.Logger` or None): The logger for error message.
302
-
303
- Returns:
304
- dict or OrderedDict: The loaded checkpoint.
305
- """
306
- checkpoint = _load_checkpoint(filename, map_location)
307
- # OrderedDict is a subclass of dict
308
- if not isinstance(checkpoint, dict):
309
- raise RuntimeError(
310
- f'No state_dict found in checkpoint file {filename}')
311
- # get state_dict from checkpoint
312
- if 'state_dict' in checkpoint:
313
- state_dict = checkpoint['state_dict']
314
- elif 'model' in checkpoint:
315
- state_dict = checkpoint['model']
316
- else:
317
- state_dict = checkpoint
318
- # strip prefix of state_dict
319
- if list(state_dict.keys())[0].startswith('module.'):
320
- state_dict = {k[7:]: v for k, v in state_dict.items()}
321
-
322
- # for MoBY, load model of online branch
323
- if sorted(list(state_dict.keys()))[0].startswith('encoder'):
324
- state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
325
-
326
- # reshape absolute position embedding
327
- if state_dict.get('absolute_pos_embed') is not None:
328
- absolute_pos_embed = state_dict['absolute_pos_embed']
329
- N1, L, C1 = absolute_pos_embed.size()
330
- N2, C2, H, W = model.absolute_pos_embed.size()
331
- if N1 != N2 or C1 != C2 or L != H*W:
332
- logger.warning("Error in loading absolute_pos_embed, pass")
333
- else:
334
- state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
335
-
336
- # interpolate position bias table if needed
337
- relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
338
- for table_key in relative_position_bias_table_keys:
339
- table_pretrained = state_dict[table_key]
340
- table_current = model.state_dict()[table_key]
341
- L1, nH1 = table_pretrained.size()
342
- L2, nH2 = table_current.size()
343
- if nH1 != nH2:
344
- logger.warning(f"Error in loading {table_key}, pass")
345
- else:
346
- if L1 != L2:
347
- S1 = int(L1 ** 0.5)
348
- S2 = int(L2 ** 0.5)
349
- table_pretrained_resized = F.interpolate(
350
- table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
351
- size=(S2, S2), mode='bicubic')
352
- state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
353
-
354
- # load state_dict
355
- load_state_dict(model, state_dict, strict, logger)
356
- return checkpoint
357
-
358
-
359
- def weights_to_cpu(state_dict):
360
- """Copy a model state_dict to cpu.
361
-
362
- Args:
363
- state_dict (OrderedDict): Model weights on GPU.
364
-
365
- Returns:
366
- OrderedDict: Model weights on GPU.
367
- """
368
- state_dict_cpu = OrderedDict()
369
- for key, val in state_dict.items():
370
- state_dict_cpu[key] = val.cpu()
371
- return state_dict_cpu
372
-
373
-
374
- def _save_to_state_dict(module, destination, prefix, keep_vars):
375
- """Saves module state to `destination` dictionary.
376
-
377
- This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
378
-
379
- Args:
380
- module (nn.Module): The module to generate state_dict.
381
- destination (dict): A dict where state will be stored.
382
- prefix (str): The prefix for parameters and buffers used in this
383
- module.
384
- """
385
- for name, param in module._parameters.items():
386
- if param is not None:
387
- destination[prefix + name] = param if keep_vars else param.detach()
388
- for name, buf in module._buffers.items():
389
- # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
390
- if buf is not None:
391
- destination[prefix + name] = buf if keep_vars else buf.detach()
392
-
393
-
394
- def get_state_dict(module, destination=None, prefix='', keep_vars=False):
395
- """Returns a dictionary containing a whole state of the module.
396
-
397
- Both parameters and persistent buffers (e.g. running averages) are
398
- included. Keys are corresponding parameter and buffer names.
399
-
400
- This method is modified from :meth:`torch.nn.Module.state_dict` to
401
- recursively check parallel module in case that the model has a complicated
402
- structure, e.g., nn.Module(nn.Module(DDP)).
403
-
404
- Args:
405
- module (nn.Module): The module to generate state_dict.
406
- destination (OrderedDict): Returned dict for the state of the
407
- module.
408
- prefix (str): Prefix of the key.
409
- keep_vars (bool): Whether to keep the variable property of the
410
- parameters. Default: False.
411
-
412
- Returns:
413
- dict: A dictionary containing a whole state of the module.
414
- """
415
- # recursively check parallel module in case that the model has a
416
- # complicated structure, e.g., nn.Module(nn.Module(DDP))
417
- if is_module_wrapper(module):
418
- module = module.module
419
-
420
- # below is the same as torch.nn.Module.state_dict()
421
- if destination is None:
422
- destination = OrderedDict()
423
- destination._metadata = OrderedDict()
424
- destination._metadata[prefix[:-1]] = local_metadata = dict(
425
- version=module._version)
426
- _save_to_state_dict(module, destination, prefix, keep_vars)
427
- for name, child in module._modules.items():
428
- if child is not None:
429
- get_state_dict(
430
- child, destination, prefix + name + '.', keep_vars=keep_vars)
431
- for hook in module._state_dict_hooks.values():
432
- hook_result = hook(module, destination, prefix, local_metadata)
433
- if hook_result is not None:
434
- destination = hook_result
435
- return destination
436
-
437
-
438
- def save_checkpoint(model, filename, optimizer=None, meta=None):
439
- """Save checkpoint to file.
440
-
441
- The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
442
- ``optimizer``. By default ``meta`` will contain version and time info.
443
-
444
- Args:
445
- model (Module): Module whose params are to be saved.
446
- filename (str): Checkpoint filename.
447
- optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
448
- meta (dict, optional): Metadata to be saved in checkpoint.
449
- """
450
- if meta is None:
451
- meta = {}
452
- elif not isinstance(meta, dict):
453
- raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
454
- meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
455
-
456
- if is_module_wrapper(model):
457
- model = model.module
458
-
459
- if hasattr(model, 'CLASSES') and model.CLASSES is not None:
460
- # save class name to the meta
461
- meta.update(CLASSES=model.CLASSES)
462
-
463
- checkpoint = {
464
- 'meta': meta,
465
- 'state_dict': weights_to_cpu(get_state_dict(model))
466
- }
467
- # save optimizer state dict in the checkpoint
468
- if isinstance(optimizer, Optimizer):
469
- checkpoint['optimizer'] = optimizer.state_dict()
470
- elif isinstance(optimizer, dict):
471
- checkpoint['optimizer'] = {}
472
- for name, optim in optimizer.items():
473
- checkpoint['optimizer'][name] = optim.state_dict()
474
-
475
- if filename.startswith('pavi://'):
476
- try:
477
- from pavi import modelcloud
478
- from pavi.exception import NodeNotFoundError
479
- except ImportError:
480
- raise ImportError(
481
- 'Please install pavi to load checkpoint from modelcloud.')
482
- model_path = filename[7:]
483
- root = modelcloud.Folder()
484
- model_dir, model_name = osp.split(model_path)
485
- try:
486
- model = modelcloud.get(model_dir)
487
- except NodeNotFoundError:
488
- model = root.create_training_model(model_dir)
489
- with TemporaryDirectory() as tmp_dir:
490
- checkpoint_file = osp.join(tmp_dir, model_name)
491
- with open(checkpoint_file, 'wb') as f:
492
- torch.save(checkpoint, f)
493
- f.flush()
494
- model.create_file(checkpoint_file, name=model_name)
495
- else:
496
- mmcv.mkdir_or_exist(osp.dirname(filename))
497
- # immediately flush buffer
498
- with open(filename, 'wb') as f:
499
- torch.save(checkpoint, f)
500
- f.flush()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .builder import build_pixel_sampler
2
- from .sampler import BasePixelSampler, OHEMPixelSampler
3
-
4
- __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler']
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/utils/utils.py DELETED
@@ -1,58 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
-
5
- class InputPadder:
6
- """ Pads images such that dimensions are divisible by 8 """
7
-
8
- def __init__(self, dims, mode='sintel', padding_factor=8):
9
- self.ht, self.wd = dims[-2:]
10
- pad_ht = (((self.ht // padding_factor) + 1) * padding_factor - self.ht) % padding_factor
11
- pad_wd = (((self.wd // padding_factor) + 1) * padding_factor - self.wd) % padding_factor
12
- if mode == 'sintel':
13
- self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2]
14
- else:
15
- self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
16
-
17
- def pad(self, *inputs):
18
- return [F.pad(x, self._pad, mode='replicate') for x in inputs]
19
-
20
- def unpad(self, x):
21
- ht, wd = x.shape[-2:]
22
- c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
23
- return x[..., c[0]:c[1], c[2]:c[3]]
24
-
25
-
26
- def coords_grid(batch, ht, wd, normalize=False):
27
- if normalize: # [-1, 1]
28
- coords = torch.meshgrid(2 * torch.arange(ht) / (ht - 1) - 1,
29
- 2 * torch.arange(wd) / (wd - 1) - 1)
30
- else:
31
- coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))
32
- coords = torch.stack(coords[::-1], dim=0).float()
33
- return coords[None].repeat(batch, 1, 1, 1) # [B, 2, H, W]
34
-
35
-
36
- def compute_out_of_boundary_mask(flow):
37
- # flow: [B, 2, H, W]
38
- assert flow.dim() == 4 and flow.size(1) == 2
39
- b, _, h, w = flow.shape
40
- init_coords = coords_grid(b, h, w).to(flow.device)
41
- corres = init_coords + flow # [B, 2, H, W]
42
-
43
- max_w = w - 1
44
- max_h = h - 1
45
-
46
- valid_mask = (corres[:, 0] >= 0) & (corres[:, 0] <= max_w) & (corres[:, 1] >= 0) & (corres[:, 1] <= max_h)
47
-
48
- # in case very large flow
49
- flow_mask = (flow[:, 0].abs() <= max_w) & (flow[:, 1].abs() <= max_h)
50
-
51
- valid_mask = valid_mask & flow_mask
52
-
53
- return valid_mask # [B, H, W]
54
-
55
-
56
- def count_parameters(model):
57
- num = sum(p.numel() for p in model.parameters() if p.requires_grad)
58
- return num
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anuj-Panthri/imdb_review_sentiment/app.py DELETED
@@ -1,20 +0,0 @@
1
- import gradio as gr
2
- from fastai.text.all import *
3
-
4
- # to fix : NotImplementedError: cannot instantiate 'PosixPath' on your system
5
- # import pathlib
6
- # temp = pathlib.PosixPath
7
- # pathlib.PosixPath = pathlib.WindowsPath
8
-
9
- examples=['This was a fantastic end to the trilogy.','I\'ve never seen a bigger waste of my time.','Just when we thought they couldn\'t possibly make a worse TV movie than Sharknado? Syfy says, "Hold my beer!"']
10
-
11
- learn=load_learner('imdb_review_sentiment_model.pkl')
12
-
13
- class_names=['neg','pos']
14
-
15
- def classify(review):
16
- _,_,pob=learn.predict(review)
17
- return dict(zip(class_names,map(float,pob)))
18
-
19
- iface = gr.Interface(fn=classify, inputs=gr.inputs.Textbox(), outputs=gr.outputs.Label(),examples=examples)
20
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arsenii2023/Demo1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Demo1
3
- emoji: 🏆
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.47.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artples/LLaMA-2-CHAT/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: LLaMA-2-CHAT
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.37.0
8
- app_file: app.py
9
- pinned: true
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/__main__.py DELETED
@@ -1,31 +0,0 @@
1
- import os
2
- import sys
3
- import warnings
4
-
5
- # Remove '' and current working directory from the first entry
6
- # of sys.path, if present to avoid using current directory
7
- # in pip commands check, freeze, install, list and show,
8
- # when invoked as python -m pip <command>
9
- if sys.path[0] in ("", os.getcwd()):
10
- sys.path.pop(0)
11
-
12
- # If we are running from a wheel, add the wheel to sys.path
13
- # This allows the usage python pip-*.whl/pip install pip-*.whl
14
- if __package__ == "":
15
- # __file__ is pip-*.whl/pip/__main__.py
16
- # first dirname call strips of '/__main__.py', second strips off '/pip'
17
- # Resulting path is the name of the wheel itself
18
- # Add that to sys.path so we can import pip
19
- path = os.path.dirname(os.path.dirname(__file__))
20
- sys.path.insert(0, path)
21
-
22
- if __name__ == "__main__":
23
- # Work around the error reported in #9540, pending a proper fix.
24
- # Note: It is essential the warning filter is set *before* importing
25
- # pip, as the deprecation happens at import time, not runtime.
26
- warnings.filterwarnings(
27
- "ignore", category=DeprecationWarning, module=".*packaging\\.version"
28
- )
29
- from pip._internal.cli.main import main as _main
30
-
31
- sys.exit(_main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_modeling.py DELETED
@@ -1,419 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- import functools
4
- import io
5
- import struct
6
- import types
7
- import torch
8
-
9
- from detectron2.modeling import meta_arch
10
- from detectron2.modeling.box_regression import Box2BoxTransform
11
- from detectron2.modeling.roi_heads import keypoint_head
12
- from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
13
-
14
- from .c10 import Caffe2Compatible
15
- from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn
16
- from .shared import (
17
- alias,
18
- check_set_pb_arg,
19
- get_pb_arg_floats,
20
- get_pb_arg_valf,
21
- get_pb_arg_vali,
22
- get_pb_arg_vals,
23
- mock_torch_nn_functional_interpolate,
24
- )
25
-
26
-
27
- def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
28
- """
29
- A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
30
- to detectron2's format (i.e. list of Instances instance).
31
- This only works when the model follows the Caffe2 detectron's naming convention.
32
-
33
- Args:
34
- image_sizes (List[List[int, int]]): [H, W] of every image.
35
- tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
36
-
37
- force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
38
- if the mask is not found from tensor_outputs (usually due to model crash)
39
- """
40
-
41
- results = [Instances(image_size) for image_size in image_sizes]
42
-
43
- batch_splits = tensor_outputs.get("batch_splits", None)
44
- if batch_splits:
45
- raise NotImplementedError()
46
- assert len(image_sizes) == 1
47
- result = results[0]
48
-
49
- bbox_nms = tensor_outputs["bbox_nms"]
50
- score_nms = tensor_outputs["score_nms"]
51
- class_nms = tensor_outputs["class_nms"]
52
- # Detection will always success because Conv support 0-batch
53
- assert bbox_nms is not None
54
- assert score_nms is not None
55
- assert class_nms is not None
56
- if bbox_nms.shape[1] == 5:
57
- result.pred_boxes = RotatedBoxes(bbox_nms)
58
- else:
59
- result.pred_boxes = Boxes(bbox_nms)
60
- result.scores = score_nms
61
- result.pred_classes = class_nms.to(torch.int64)
62
-
63
- mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
64
- if mask_fcn_probs is not None:
65
- # finish the mask pred
66
- mask_probs_pred = mask_fcn_probs
67
- num_masks = mask_probs_pred.shape[0]
68
- class_pred = result.pred_classes
69
- indices = torch.arange(num_masks, device=class_pred.device)
70
- mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
71
- result.pred_masks = mask_probs_pred
72
- elif force_mask_on:
73
- # NOTE: there's no way to know the height/width of mask here, it won't be
74
- # used anyway when batch size is 0, so just set them to 0.
75
- result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
76
-
77
- keypoints_out = tensor_outputs.get("keypoints_out", None)
78
- kps_score = tensor_outputs.get("kps_score", None)
79
- if keypoints_out is not None:
80
- # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
81
- keypoints_tensor = keypoints_out
82
- # NOTE: it's possible that prob is not calculated if "should_output_softmax"
83
- # is set to False in HeatmapMaxKeypoint, so just using raw score, seems
84
- # it doesn't affect mAP. TODO: check more carefully.
85
- keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
86
- result.pred_keypoints = keypoint_xyp
87
- elif kps_score is not None:
88
- # keypoint heatmap to sparse data structure
89
- pred_keypoint_logits = kps_score
90
- keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
91
-
92
- return results
93
-
94
-
95
- def _cast_to_f32(f64):
96
- return struct.unpack("f", struct.pack("f", f64))[0]
97
-
98
-
99
- def set_caffe2_compatible_tensor_mode(model, enable=True):
100
- def _fn(m):
101
- if isinstance(m, Caffe2Compatible):
102
- m.tensor_mode = enable
103
-
104
- model.apply(_fn)
105
-
106
-
107
- def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
108
- """
109
- See get_caffe2_inputs() below.
110
- """
111
- assert all(isinstance(x, dict) for x in batched_inputs)
112
- assert all(x["image"].dim() == 3 for x in batched_inputs)
113
-
114
- images = [x["image"] for x in batched_inputs]
115
- images = ImageList.from_tensors(images, size_divisibility)
116
-
117
- im_info = []
118
- for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
119
- target_height = input_per_image.get("height", image_size[0])
120
- target_width = input_per_image.get("width", image_size[1]) # noqa
121
- # NOTE: The scale inside im_info is kept as convention and for providing
122
- # post-processing information if further processing is needed. For
123
- # current Caffe2 model definitions that don't include post-processing inside
124
- # the model, this number is not used.
125
- # NOTE: There can be a slight difference between width and height
126
- # scales, using a single number can results in numerical difference
127
- # compared with D2's post-processing.
128
- scale = target_height / image_size[0]
129
- im_info.append([image_size[0], image_size[1], scale])
130
- im_info = torch.Tensor(im_info)
131
-
132
- return images.tensor.to(device), im_info.to(device)
133
-
134
-
135
- class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
136
- """
137
- Base class for caffe2-compatible implementation of a meta architecture.
138
- The forward is traceable and its traced graph can be converted to caffe2
139
- graph through ONNX.
140
- """
141
-
142
- def __init__(self, cfg, torch_model):
143
- """
144
- Args:
145
- cfg (CfgNode):
146
- torch_model (nn.Module): the detectron2 model (meta_arch) to be
147
- converted.
148
- """
149
- super().__init__()
150
- self._wrapped_model = torch_model
151
- self.eval()
152
- set_caffe2_compatible_tensor_mode(self, True)
153
-
154
- def get_caffe2_inputs(self, batched_inputs):
155
- """
156
- Convert pytorch-style structured inputs to caffe2-style inputs that
157
- are tuples of tensors.
158
-
159
- Args:
160
- batched_inputs (list[dict]): inputs to a detectron2 model
161
- in its standard format. Each dict has "image" (CHW tensor), and optionally
162
- "height" and "width".
163
-
164
- Returns:
165
- tuple[Tensor]:
166
- tuple of tensors that will be the inputs to the
167
- :meth:`forward` method. For existing models, the first
168
- is an NCHW tensor (padded and batched); the second is
169
- a im_info Nx3 tensor, where the rows are
170
- (height, width, unused legacy parameter)
171
- """
172
- return convert_batched_inputs_to_c2_format(
173
- batched_inputs,
174
- self._wrapped_model.backbone.size_divisibility,
175
- self._wrapped_model.device,
176
- )
177
-
178
- def encode_additional_info(self, predict_net, init_net):
179
- """
180
- Save extra metadata that will be used by inference in the output protobuf.
181
- """
182
- pass
183
-
184
- def forward(self, inputs):
185
- """
186
- Run the forward in caffe2-style. It has to use caffe2-compatible ops
187
- and the method will be used for tracing.
188
-
189
- Args:
190
- inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
191
- They will be the inputs of the converted caffe2 graph.
192
-
193
- Returns:
194
- tuple[Tensor]: output tensors. They will be the outputs of the
195
- converted caffe2 graph.
196
- """
197
- raise NotImplementedError
198
-
199
- def _caffe2_preprocess_image(self, inputs):
200
- """
201
- Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
202
- It normalizes the input images, and the final caffe2 graph assumes the
203
- inputs have been batched already.
204
- """
205
- data, im_info = inputs
206
- data = alias(data, "data")
207
- im_info = alias(im_info, "im_info")
208
- mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std
209
- normalized_data = (data - mean) / std
210
- normalized_data = alias(normalized_data, "normalized_data")
211
-
212
- # Pack (data, im_info) into ImageList which is recognized by self.inference.
213
- images = ImageList(tensor=normalized_data, image_sizes=im_info)
214
- return images
215
-
216
- @staticmethod
217
- def get_outputs_converter(predict_net, init_net):
218
- """
219
- Creates a function that converts outputs of the caffe2 model to
220
- detectron2's standard format.
221
- The function uses information in `predict_net` and `init_net` that are
222
- available at inferene time. Therefore the function logic can be used in inference.
223
-
224
- The returned function has the following signature:
225
-
226
- def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
227
-
228
- Where
229
-
230
- * batched_inputs (list[dict]): the original input format of the meta arch
231
- * c2_inputs (tuple[Tensor]): the caffe2 inputs.
232
- * c2_results (dict[str, Tensor]): the caffe2 output format,
233
- corresponding to the outputs of the :meth:`forward` function.
234
- * detectron2_outputs: the original output format of the meta arch.
235
-
236
- This function can be used to compare the outputs of the original meta arch and
237
- the converted caffe2 graph.
238
-
239
- Returns:
240
- callable: a callable of the above signature.
241
- """
242
- raise NotImplementedError
243
-
244
-
245
- class Caffe2GeneralizedRCNN(Caffe2MetaArch):
246
- def __init__(self, cfg, torch_model):
247
- assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
248
- torch_model = patch_generalized_rcnn(torch_model)
249
- super().__init__(cfg, torch_model)
250
-
251
- try:
252
- use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
253
- except AttributeError:
254
- use_heatmap_max_keypoint = False
255
- self.roi_heads_patcher = ROIHeadsPatcher(
256
- self._wrapped_model.roi_heads, use_heatmap_max_keypoint
257
- )
258
-
259
- def encode_additional_info(self, predict_net, init_net):
260
- size_divisibility = self._wrapped_model.backbone.size_divisibility
261
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
262
- check_set_pb_arg(
263
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
264
- )
265
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
266
-
267
- @mock_torch_nn_functional_interpolate()
268
- def forward(self, inputs):
269
- if not self.tensor_mode:
270
- return self._wrapped_model.inference(inputs)
271
- images = self._caffe2_preprocess_image(inputs)
272
- features = self._wrapped_model.backbone(images.tensor)
273
- proposals, _ = self._wrapped_model.proposal_generator(images, features)
274
- with self.roi_heads_patcher.mock_roi_heads():
275
- detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
276
- return tuple(detector_results[0].flatten())
277
-
278
- @staticmethod
279
- def get_outputs_converter(predict_net, init_net):
280
- def f(batched_inputs, c2_inputs, c2_results):
281
- _, im_info = c2_inputs
282
- image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
283
- results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
284
- return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
285
-
286
- return f
287
-
288
-
289
- class Caffe2RetinaNet(Caffe2MetaArch):
290
- def __init__(self, cfg, torch_model):
291
- assert isinstance(torch_model, meta_arch.RetinaNet)
292
- super().__init__(cfg, torch_model)
293
-
294
- @mock_torch_nn_functional_interpolate()
295
- def forward(self, inputs):
296
- assert self.tensor_mode
297
- images = self._caffe2_preprocess_image(inputs)
298
-
299
- # explicitly return the images sizes to avoid removing "im_info" by ONNX
300
- # since it's not used in the forward path
301
- return_tensors = [images.image_sizes]
302
-
303
- features = self._wrapped_model.backbone(images.tensor)
304
- features = [features[f] for f in self._wrapped_model.head_in_features]
305
- for i, feature_i in enumerate(features):
306
- features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
307
- return_tensors.append(features[i])
308
-
309
- pred_logits, pred_anchor_deltas = self._wrapped_model.head(features)
310
- for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)):
311
- return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
312
- return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
313
-
314
- return tuple(return_tensors)
315
-
316
- def encode_additional_info(self, predict_net, init_net):
317
- size_divisibility = self._wrapped_model.backbone.size_divisibility
318
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
319
- check_set_pb_arg(
320
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
321
- )
322
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
323
-
324
- # Inference parameters:
325
- check_set_pb_arg(
326
- predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh)
327
- )
328
- check_set_pb_arg(
329
- predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates
330
- )
331
- check_set_pb_arg(
332
- predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh)
333
- )
334
- check_set_pb_arg(
335
- predict_net,
336
- "max_detections_per_image",
337
- "i",
338
- self._wrapped_model.max_detections_per_image,
339
- )
340
-
341
- check_set_pb_arg(
342
- predict_net,
343
- "bbox_reg_weights",
344
- "floats",
345
- [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
346
- )
347
- self._encode_anchor_generator_cfg(predict_net)
348
-
349
- def _encode_anchor_generator_cfg(self, predict_net):
350
- # serialize anchor_generator for future use
351
- serialized_anchor_generator = io.BytesIO()
352
- torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
353
- # Ideally we can put anchor generating inside the model, then we don't
354
- # need to store this information.
355
- bytes = serialized_anchor_generator.getvalue()
356
- check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
357
-
358
- @staticmethod
359
- def get_outputs_converter(predict_net, init_net):
360
- self = types.SimpleNamespace()
361
- serialized_anchor_generator = io.BytesIO(
362
- get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
363
- )
364
- self.anchor_generator = torch.load(serialized_anchor_generator)
365
- bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
366
- self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
367
- self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None)
368
- self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
369
- self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None)
370
- self.max_detections_per_image = get_pb_arg_vali(
371
- predict_net, "max_detections_per_image", None
372
- )
373
-
374
- # hack to reuse inference code from RetinaNet
375
- for meth in [
376
- "forward_inference",
377
- "inference_single_image",
378
- "_transpose_dense_predictions",
379
- "_decode_multi_level_predictions",
380
- "_decode_per_level_predictions",
381
- ]:
382
- setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self))
383
-
384
- def f(batched_inputs, c2_inputs, c2_results):
385
- _, im_info = c2_inputs
386
- image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
387
- dummy_images = ImageList(
388
- torch.randn(
389
- (
390
- len(im_info),
391
- 3,
392
- )
393
- + tuple(image_sizes[0])
394
- ),
395
- image_sizes,
396
- )
397
-
398
- num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
399
- pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
400
- pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
401
-
402
- # For each feature level, feature should have the same batch size and
403
- # spatial dimension as the box_cls and box_delta.
404
- dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits]
405
- # self.num_classess can be inferred
406
- self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4)
407
-
408
- results = self.forward_inference(
409
- dummy_images, dummy_features, [pred_logits, pred_anchor_deltas]
410
- )
411
- return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
412
-
413
- return f
414
-
415
-
416
- META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
417
- "GeneralizedRCNN": Caffe2GeneralizedRCNN,
418
- "RetinaNet": Caffe2RetinaNet,
419
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
 
 
spaces/Bart92/RVC_HF/infer/modules/train/extract/extract_f0_rmvpe.py DELETED
@@ -1,141 +0,0 @@
1
- import os
2
- import sys
3
- import traceback
4
-
5
- import parselmouth
6
-
7
- now_dir = os.getcwd()
8
- sys.path.append(now_dir)
9
- import logging
10
-
11
- import numpy as np
12
- import pyworld
13
-
14
- from infer.lib.audio import load_audio
15
-
16
- logging.getLogger("numba").setLevel(logging.WARNING)
17
-
18
- n_part = int(sys.argv[1])
19
- i_part = int(sys.argv[2])
20
- i_gpu = sys.argv[3]
21
- os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
22
- exp_dir = sys.argv[4]
23
- is_half = sys.argv[5]
24
- f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
25
-
26
-
27
- def printt(strr):
28
- print(strr)
29
- f.write("%s\n" % strr)
30
- f.flush()
31
-
32
-
33
- class FeatureInput(object):
34
- def __init__(self, samplerate=16000, hop_size=160):
35
- self.fs = samplerate
36
- self.hop = hop_size
37
-
38
- self.f0_bin = 256
39
- self.f0_max = 1100.0
40
- self.f0_min = 50.0
41
- self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
42
- self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
43
-
44
- def compute_f0(self, path, f0_method):
45
- x = load_audio(path, self.fs)
46
- # p_len = x.shape[0] // self.hop
47
- if f0_method == "rmvpe":
48
- if hasattr(self, "model_rmvpe") == False:
49
- from infer.lib.rmvpe import RMVPE
50
-
51
- print("Loading rmvpe model")
52
- self.model_rmvpe = RMVPE(
53
- "assets/rmvpe/rmvpe.pt", is_half=is_half, device="cuda"
54
- )
55
- f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
56
- return f0
57
-
58
- def coarse_f0(self, f0):
59
- f0_mel = 1127 * np.log(1 + f0 / 700)
60
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * (
61
- self.f0_bin - 2
62
- ) / (self.f0_mel_max - self.f0_mel_min) + 1
63
-
64
- # use 0 or 1
65
- f0_mel[f0_mel <= 1] = 1
66
- f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1
67
- f0_coarse = np.rint(f0_mel).astype(int)
68
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
69
- f0_coarse.max(),
70
- f0_coarse.min(),
71
- )
72
- return f0_coarse
73
-
74
- def go(self, paths, f0_method):
75
- if len(paths) == 0:
76
- printt("no-f0-todo")
77
- else:
78
- printt("todo-f0-%s" % len(paths))
79
- n = max(len(paths) // 5, 1) # 每个进程最多打印5条
80
- for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths):
81
- try:
82
- if idx % n == 0:
83
- printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path))
84
- if (
85
- os.path.exists(opt_path1 + ".npy") == True
86
- and os.path.exists(opt_path2 + ".npy") == True
87
- ):
88
- continue
89
- featur_pit = self.compute_f0(inp_path, f0_method)
90
- np.save(
91
- opt_path2,
92
- featur_pit,
93
- allow_pickle=False,
94
- ) # nsf
95
- coarse_pit = self.coarse_f0(featur_pit)
96
- np.save(
97
- opt_path1,
98
- coarse_pit,
99
- allow_pickle=False,
100
- ) # ori
101
- except:
102
- printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc()))
103
-
104
-
105
- if __name__ == "__main__":
106
- # exp_dir=r"E:\codes\py39\dataset\mi-test"
107
- # n_p=16
108
- # f = open("%s/log_extract_f0.log"%exp_dir, "w")
109
- printt(sys.argv)
110
- featureInput = FeatureInput()
111
- paths = []
112
- inp_root = "%s/1_16k_wavs" % (exp_dir)
113
- opt_root1 = "%s/2a_f0" % (exp_dir)
114
- opt_root2 = "%s/2b-f0nsf" % (exp_dir)
115
-
116
- os.makedirs(opt_root1, exist_ok=True)
117
- os.makedirs(opt_root2, exist_ok=True)
118
- for name in sorted(list(os.listdir(inp_root))):
119
- inp_path = "%s/%s" % (inp_root, name)
120
- if "spec" in inp_path:
121
- continue
122
- opt_path1 = "%s/%s" % (opt_root1, name)
123
- opt_path2 = "%s/%s" % (opt_root2, name)
124
- paths.append([inp_path, opt_path1, opt_path2])
125
- try:
126
- featureInput.go(paths[i_part::n_part], "rmvpe")
127
- except:
128
- printt("f0_all_fail-%s" % (traceback.format_exc()))
129
- # ps = []
130
- # for i in range(n_p):
131
- # p = Process(
132
- # target=featureInput.go,
133
- # args=(
134
- # paths[i::n_p],
135
- # f0method,
136
- # ),
137
- # )
138
- # ps.append(p)
139
- # p.start()
140
- # for i in range(n_p):
141
- # ps[i].join()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De La Red M.hollywoodbets.net.md DELETED
@@ -1,104 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar y usar m.hollywoodbets.net</h1>
3
- <p>Si usted está buscando una manera conveniente y rápida de apostar en deportes, carreras de caballos, juegos de casino, y más, es posible que desee descargar y utilizar m.hollywoodbets.net. Esta es la versión móvil de Hollywoodbets, una de las plataformas de apuestas en línea más populares en Sudáfrica. En este artículo, le mostraremos lo que es m.hollywoodbets.net, por qué debe descargarlo, cómo descargarlo, cómo usarlo y cómo resolver algunos problemas comunes con él. </p>
4
- <h2>descarga de la red m.hollywoodbets.net</h2><br /><p><b><b>Download</b> &#9658; <a href="https://bltlly.com/2v6JvX">https://bltlly.com/2v6JvX</a></b></p><br /><br />
5
- <h2>¿Qué es m.hollywoodbets.net? </h2>
6
- <p>m.hollywoodbets.net es el sitio móvil de Hollywoodbets, un operador de apuestas con licencia que ofrece una amplia gama de opciones de apuestas en varios deportes y eventos. Puedes apostar en fútbol, rugby, cricket, tenis, golf, baloncesto, etc. También puedes apostar en carreras de caballos desde Sudáfrica y otros países. Además, puedes jugar juegos de casino, tragamonedas, números de la suerte, betgames, juegos en vivo y más. Puede acceder a todas estas funciones desde su dispositivo móvil usando m.hollywoodbets.net. </p>
7
- <h2>¿Por qué descargar m.hollywoodbets.net? </h2>
8
- <p>Hay muchos beneficios de descargar y usar m.hollywoodbets.net. Estos son algunos de ellos:</p>
9
- <ul>
10
- <li><strong>Conveniencia:</strong> Puedes apostar en cualquier momento y en cualquier lugar usando tu dispositivo móvil. No necesita un ordenador o un navegador para acceder al sitio. Simplemente puede tocar el icono de la aplicación y comenzar a apostar. </li>
11
- <li><strong>Velocidad:</strong> El sitio móvil está optimizado para una carga rápida y un rendimiento suave. Usted puede hacer sus apuestas rápida y fácilmente sin ningún tipo de retrasos o fallos. </li>
12
- <li><strong>Acceso sin datos:</strong> Puedes acceder al sitio móvil sin usar ningún dato. Hollywoodbets se ha asociado con varios proveedores de red para ofrecer acceso sin datos a sus clientes. Puede comprobar si su proveedor de red es compatible visitando [1](https://sport.hollywoodbets.net/). </li>
13
-
14
- </ul>
15
- <h2>¿Cómo descargar m.hollywoodbets.net? </h2>
16
- <p>Si tienes un dispositivo Android, puedes descargar e instalar la aplicación para m.hollywoodbets.net siguiendo estos pasos:</p>
17
- <ol>
18
- <li>Visite [1](https://sport.hollywoodbets.net/) desde su navegador móvil e inicie sesión en su cuenta. Si aún no tiene una cuenta, puede registrarla haciendo clic en "Unirse ahora". </li>
19
- <li>Desplácese hacia abajo a la parte inferior de la página y haga clic en "Aplicación para teléfono de características básicas". Esto lo redirigirá a un sitio donde puede descargar la aplicación. </li>
20
- <li>Haga clic en "Descargar aplicación Android" y esperar a que la descarga se complete. </li>
21
- <li>Vaya a la configuración de seguridad y permita la instalación desde fuentes desconocidas. </li>
22
- <li>Abra el archivo descargado e instale la aplicación en su dispositivo. </li>
23
- </ol>
24
- <p>Tenga en cuenta que no hay una aplicación oficial para dispositivos iOS, por lo que tendrá que utilizar la versión del navegador móvil si tiene un iPhone o iPad. </p>
25
- <h2>¿Cómo usar m.hollywoodbets.net? </h2>
26
- <p>Usar m.hollywoodbets.net es fácil y simple. Estos son algunos pasos básicos para empezar:</p>
27
- <p></p>
28
- <ol>
29
- <li>Inicie sesión en su cuenta usando su nombre de usuario y contraseña. Si olvidó su contraseña, puede restablecerla haciendo clic en "Olvidé la contraseña". </li>
30
- <li>Elija la categoría de apuestas que desea hacer, como deportes, carreras de caballos, casino, etc. Puede utilizar el icono del menú en la esquina superior izquierda para navegar entre diferentes categorías. </li>
31
- <li>Selecciona el evento o juego en el que quieres apostar. Puedes usar la barra de búsqueda o los filtros para encontrar lo que buscas. </li>
32
- <li>Elija el mercado y las probabilidades en las que desea apostar. Puede pulsar en las probabilidades para agregarlas a su boleto de apuesta. </li>
33
- <li>Introduzca la cantidad que desea apostar y confirme su apuesta. También puede usar la función "Quick Bet" para realizar su apuesta más rápido. </li>
34
- <li>Revise su historial de apuestas y balance haciendo clic en "Mi cuenta". También puede ver sus apuestas pendientes, apuestas liquidadas y apuestas abiertas. </li>
35
- </ol>
36
-
37
- <p>Para depositar y retirar dinero usando m.hollywoodbets.net, necesita tener una cuenta verificada y una cuenta bancaria o tarjeta válida. Estos son algunos métodos que puedes usar:</p>
38
- <tabla>
39
- <tr><th>Método</th><th>Depósito</th><th>Retiro</th></tr>
40
- <tr><td>Transferencia bancaria</td><td>Sí</td><td>Sí</td></tr>
41
- <tr><td>Tarjeta de crédito/débito</td><td>Sí</td><td>No</td></tr>
42
- <tr><td>EFT</td><td>Sí</td><td>No</td></tr>
43
- <tr><td>Ozow</td><td>Sí</td><td>No</td></tr>
44
- <tr><td>Peach Payments</td><td>Sí</td><td>No</td></tr>
45
- <tr><td>Zapper</td><td>Sí</td><td>No</td></tr>
46
- <tr><td>Voucher</td><td>Sí</td><td>No</td></tr>
47
- <tr><td>Ramas de Hollywoodbets</td><td>Sí</td><td>Sí</td></tr>
48
- <tr><td>Tarjeta de cajero automático Hollywoodbets</td><td>No</td><td>Sí</td></tr>
49
- <tr><td>Hollywoodbets eWallet (FNB)</td><td>No</td><td>Sí</td></tr>
50
- <tr><td>Hollywoodbets Instant Money (Standard Bank)</td><td>No</td><td>Sí</td></tr>
51
- <tr><td>Envío de efectivo de Hollywoodbets (Absa)</td><td>No</td><td>Sí</td></tr>
52
- <tr><td>Hollywoodbets Cash Send Plus (Nedbank)</t d><td>No</td><td>Sí</td></tr>
53
- </tabla>
54
- <p>Para hacer un depósito, puedes seguir estos pasos:</p>
55
- <ol>
56
- <li>Inicie sesión en su cuenta y haga clic en "Depositar". </li>
57
- <li> Seleccione el método que desea utilizar e introduzca la cantidad que desea depositar. </li>
58
- <li>Siga las instrucciones en la pantalla para completar la transacción. </li>
59
- <li>Espera el mensaje de confirmación y comprueba tu saldo. </li>
60
- </ol>
61
- <p>Para realizar un retiro, puedes seguir estos pasos:</p>
62
- <ol>
63
- <li>Inicie sesión en su cuenta y haga clic en "Retirar". </li>
64
- <li> Seleccione el método que desea utilizar e introduzca la cantidad que desea retirar. </li>
65
- <li>Introduzca los datos de su cuenta bancaria o tarjeta si es necesario. </li>
66
- <li>Confirme su solicitud y espere la aprobación. </li>
67
- <li>Revise su cuenta bancaria o extracto de la tarjeta para los fondos. </li>
68
- </ol>
69
- <h2>¿Cómo contactar a atención al cliente usando m.hollywoodbets.net? </h2>
70
-
71
- <ul>
72
- <li><strong>Live Chat:</strong> Puede utilizar la función de chat en vivo en el sitio móvil para chatear con un agente amigable y útil. Puede acceder al chat en vivo haciendo clic en el icono "Ayuda" en la esquina inferior derecha de la pantalla. </li>
73
- <li><strong>Correo electrónico:</strong> Puede enviar un correo electrónico a [email protected] o [email protected] y esperar una respuesta en 24 horas. </li>
74
- <li><strong>Teléfono:</strong> Puede llamar al número gratuito 08600 42387 o al número alternativo 087 353 7634 y hablar con un representante. </li>
75
- <li><strong>Redes sociales:</strong> Puedes seguir a Hollywoodbets en Facebook, Twitter, Instagram, YouTube y Telegram y enviarles un mensaje o comentario en sus publicaciones. </li>
76
- </ul>
77
- <h2>Problemas comunes con m.hollywoodbets.net y cómo resolverlos</h2>
78
- <p>Mientras m.hollywoodbets.net está diseñado para proporcionar una experiencia de apuestas sin problemas y sin problemas, es posible que encuentre algunos problemas con él de vez en cuando. Estos son algunos de los problemas comunes y cómo resolverlos:</p>
79
- <ul>
80
- <li><strong>Errores de inicio de sesión:</strong> Si no puede iniciar sesión en su cuenta, es posible que haya introducido el nombre de usuario o contraseña incorrectos, o que su cuenta esté bloqueada debido a inactividad o razones de seguridad. Para resolver este problema, puede intentar restablecer su contraseña haciendo clic en "Olvidé la contraseña", o ponerse en contacto con el servicio de atención al cliente para desbloquear su cuenta. </li>
81
- <li><strong>Inactividad de la cuenta:</strong> Si no ha utilizado su cuenta durante más de 90 días, puede desactivarse debido a la inactividad. Para reactivar su cuenta, debe ponerse en contacto con el servicio de atención al cliente y proporcionarle sus documentos FICA (prueba de identidad y dirección). </li>
82
-
83
- <li><strong>FICA verification:</strong> FICA significa Financial Intelligence Centre Act, que es una ley que requiere que todos los operadores de apuestas verifiquen la identidad y la dirección de sus clientes. Para cumplir con esta ley, debe presentar sus documentos FICA (prueba de identidad y dirección) cuando registre una cuenta o realice un retiro. Puede subir sus documentos en línea haciendo clic en "Mi cuenta" y "FICA", o enviarlos por correo electrónico a [email protected]</li>
84
- </ul>
85
- <h2>Conclusión</h2>
86
- <p>m.hollywoodbets.net es una gran manera de disfrutar de las apuestas en línea en su dispositivo móvil. Puede descargarlo y usarlo fácilmente y acceder a una variedad de opciones de apuestas, promociones y características. También puede depositar y retirar dinero de forma segura y ponerse en contacto con el servicio de atención al cliente convenientemente. Si encuentra algún problema con el sitio móvil, puede seguir los consejos anteriores o comunicarse con el servicio de atención al cliente para obtener ayuda. Entonces, ¿qué estás esperando? Descargar m.hollywoodbets.net hoy y empezar a apostar! </p>
87
- <h2>Preguntas frecuentes</h2>
88
- <h3>¿Es m.hollywoodbets.net seguro y legal? </h3>
89
- <p>Sí, m.hollywoodbets.net es seguro y legal. Hollywoodbets está autorizado por el Western Cape Gambling and Racing Board y se adhiere a estrictas normas de seguridad. Todas las transacciones están encriptadas y protegidas por la tecnología SSL. Toda la información personal se mantiene confidencial y no se comparte con terceros. </p>
90
- <h3> ¿Cuáles son las apuestas mínimas y máximas en m.hollywoodbets.net? </h p>La apuesta mínima en m.hollywoodbets.net es R1, mientras que la apuesta máxima depende del evento y el mercado en el que esté apostando. Puede comprobar la apuesta máxima haciendo clic en "Max Bet" en su boleto de apuesta. </p>
91
- <h3>¿Cómo puedo obtener apuestas gratis en m.hollywoodbets.net? </h3>
92
- <p>Hay varias formas de obtener apuestas gratis en m.hollywoodbets.net. Algunas de ellas son:</p>
93
- <ul>
94
- <li>Registrar una nueva cuenta y obtener un bono de registro R25. </li>
95
- <li>Referir a un amigo y obtener un bono R50 por cada referencia exitosa. </li>
96
-
97
- <li>Uso de vales que puede comprar en los minoristas seleccionados o recibir de atención al cliente. </li>
98
- </ul>
99
- <h3>¿Cómo puedo comprobar los resultados de mis apuestas en m.hollywoodbets.net? </h3>
100
- <p>Puede comprobar los resultados de sus apuestas en m.hollywoodbets.net haciendo clic en "Mi cuenta" y "Historial de apuestas". También puede utilizar la función "Resultados" en el sitio móvil para comprobar los resultados de varios eventos y juegos. </p>
101
- <h3>¿Cómo puedo actualizar mis datos personales en m.hollywoodbets.net? </h3>
102
- <p>Puede actualizar sus datos personales en m.hollywoodbets.net haciendo clic en "Mi cuenta" y "Datos personales". Puede cambiar su contraseña, dirección de correo electrónico, número de teléfono y pregunta de seguridad. Sin embargo, no puede cambiar su nombre, apellido, fecha de nacimiento o número de identificación. Si necesita cambiar estos datos, debe ponerse en contacto con el servicio de atención al cliente y proporcionarle una prueba de identidad. </p> 64aa2da5cf<br />
103
- <br />
104
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/zoneinfo/rebuild.py DELETED
@@ -1,75 +0,0 @@
1
- import logging
2
- import os
3
- import tempfile
4
- import shutil
5
- import json
6
- from subprocess import check_call, check_output
7
- from tarfile import TarFile
8
-
9
- from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
10
-
11
-
12
- def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
13
- """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
14
-
15
- filename is the timezone tarball from ``ftp.iana.org/tz``.
16
-
17
- """
18
- tmpdir = tempfile.mkdtemp()
19
- zonedir = os.path.join(tmpdir, "zoneinfo")
20
- moduledir = os.path.dirname(__file__)
21
- try:
22
- with TarFile.open(filename) as tf:
23
- for name in zonegroups:
24
- tf.extract(name, tmpdir)
25
- filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
26
-
27
- _run_zic(zonedir, filepaths)
28
-
29
- # write metadata file
30
- with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
31
- json.dump(metadata, f, indent=4, sort_keys=True)
32
- target = os.path.join(moduledir, ZONEFILENAME)
33
- with TarFile.open(target, "w:%s" % format) as tf:
34
- for entry in os.listdir(zonedir):
35
- entrypath = os.path.join(zonedir, entry)
36
- tf.add(entrypath, entry)
37
- finally:
38
- shutil.rmtree(tmpdir)
39
-
40
-
41
- def _run_zic(zonedir, filepaths):
42
- """Calls the ``zic`` compiler in a compatible way to get a "fat" binary.
43
-
44
- Recent versions of ``zic`` default to ``-b slim``, while older versions
45
- don't even have the ``-b`` option (but default to "fat" binaries). The
46
- current version of dateutil does not support Version 2+ TZif files, which
47
- causes problems when used in conjunction with "slim" binaries, so this
48
- function is used to ensure that we always get a "fat" binary.
49
- """
50
-
51
- try:
52
- help_text = check_output(["zic", "--help"])
53
- except OSError as e:
54
- _print_on_nosuchfile(e)
55
- raise
56
-
57
- if b"-b " in help_text:
58
- bloat_args = ["-b", "fat"]
59
- else:
60
- bloat_args = []
61
-
62
- check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths)
63
-
64
-
65
- def _print_on_nosuchfile(e):
66
- """Print helpful troubleshooting message
67
-
68
- e is an exception raised by subprocess.check_call()
69
-
70
- """
71
- if e.errno == 2:
72
- logging.error(
73
- "Could not find zic. Perhaps you need to install "
74
- "libc-bin or some other package that provides it, "
75
- "or it's not in your PATH?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/metadata/languages.py DELETED
@@ -1,352 +0,0 @@
1
- """
2
- Metadata about languages used by our model training code for our
3
- SingleByteCharSetProbers. Could be used for other things in the future.
4
-
5
- This code is based on the language metadata from the uchardet project.
6
- """
7
-
8
- from string import ascii_letters
9
- from typing import List, Optional
10
-
11
- # TODO: Add Ukrainian (KOI8-U)
12
-
13
-
14
- class Language:
15
- """Metadata about a language useful for training models
16
-
17
- :ivar name: The human name for the language, in English.
18
- :type name: str
19
- :ivar iso_code: 2-letter ISO 639-1 if possible, 3-letter ISO code otherwise,
20
- or use another catalog as a last resort.
21
- :type iso_code: str
22
- :ivar use_ascii: Whether or not ASCII letters should be included in trained
23
- models.
24
- :type use_ascii: bool
25
- :ivar charsets: The charsets we want to support and create data for.
26
- :type charsets: list of str
27
- :ivar alphabet: The characters in the language's alphabet. If `use_ascii` is
28
- `True`, you only need to add those not in the ASCII set.
29
- :type alphabet: str
30
- :ivar wiki_start_pages: The Wikipedia pages to start from if we're crawling
31
- Wikipedia for training data.
32
- :type wiki_start_pages: list of str
33
- """
34
-
35
- def __init__(
36
- self,
37
- name: Optional[str] = None,
38
- iso_code: Optional[str] = None,
39
- use_ascii: bool = True,
40
- charsets: Optional[List[str]] = None,
41
- alphabet: Optional[str] = None,
42
- wiki_start_pages: Optional[List[str]] = None,
43
- ) -> None:
44
- super().__init__()
45
- self.name = name
46
- self.iso_code = iso_code
47
- self.use_ascii = use_ascii
48
- self.charsets = charsets
49
- if self.use_ascii:
50
- if alphabet:
51
- alphabet += ascii_letters
52
- else:
53
- alphabet = ascii_letters
54
- elif not alphabet:
55
- raise ValueError("Must supply alphabet if use_ascii is False")
56
- self.alphabet = "".join(sorted(set(alphabet))) if alphabet else None
57
- self.wiki_start_pages = wiki_start_pages
58
-
59
- def __repr__(self) -> str:
60
- param_str = ", ".join(
61
- f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_")
62
- )
63
- return f"{self.__class__.__name__}({param_str})"
64
-
65
-
66
- LANGUAGES = {
67
- "Arabic": Language(
68
- name="Arabic",
69
- iso_code="ar",
70
- use_ascii=False,
71
- # We only support encodings that use isolated
72
- # forms, because the current recommendation is
73
- # that the rendering system handles presentation
74
- # forms. This means we purposefully skip IBM864.
75
- charsets=["ISO-8859-6", "WINDOWS-1256", "CP720", "CP864"],
76
- alphabet="ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ",
77
- wiki_start_pages=["الصفحة_الرئيسية"],
78
- ),
79
- "Belarusian": Language(
80
- name="Belarusian",
81
- iso_code="be",
82
- use_ascii=False,
83
- charsets=["ISO-8859-5", "WINDOWS-1251", "IBM866", "MacCyrillic"],
84
- alphabet="АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯабвгдеёжзійклмнопрстуўфхцчшыьэюяʼ",
85
- wiki_start_pages=["Галоўная_старонка"],
86
- ),
87
- "Bulgarian": Language(
88
- name="Bulgarian",
89
- iso_code="bg",
90
- use_ascii=False,
91
- charsets=["ISO-8859-5", "WINDOWS-1251", "IBM855"],
92
- alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
93
- wiki_start_pages=["Начална_страница"],
94
- ),
95
- "Czech": Language(
96
- name="Czech",
97
- iso_code="cz",
98
- use_ascii=True,
99
- charsets=["ISO-8859-2", "WINDOWS-1250"],
100
- alphabet="áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ",
101
- wiki_start_pages=["Hlavní_strana"],
102
- ),
103
- "Danish": Language(
104
- name="Danish",
105
- iso_code="da",
106
- use_ascii=True,
107
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
108
- alphabet="æøåÆØÅ",
109
- wiki_start_pages=["Forside"],
110
- ),
111
- "German": Language(
112
- name="German",
113
- iso_code="de",
114
- use_ascii=True,
115
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
116
- alphabet="äöüßẞÄÖÜ",
117
- wiki_start_pages=["Wikipedia:Hauptseite"],
118
- ),
119
- "Greek": Language(
120
- name="Greek",
121
- iso_code="el",
122
- use_ascii=False,
123
- charsets=["ISO-8859-7", "WINDOWS-1253"],
124
- alphabet="αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ",
125
- wiki_start_pages=["Πύλη:Κύρια"],
126
- ),
127
- "English": Language(
128
- name="English",
129
- iso_code="en",
130
- use_ascii=True,
131
- charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
132
- wiki_start_pages=["Main_Page"],
133
- ),
134
- "Esperanto": Language(
135
- name="Esperanto",
136
- iso_code="eo",
137
- # Q, W, X, and Y not used at all
138
- use_ascii=False,
139
- charsets=["ISO-8859-3"],
140
- alphabet="abcĉdefgĝhĥijĵklmnoprsŝtuŭvzABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ",
141
- wiki_start_pages=["Vikipedio:Ĉefpaĝo"],
142
- ),
143
- "Spanish": Language(
144
- name="Spanish",
145
- iso_code="es",
146
- use_ascii=True,
147
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
148
- alphabet="ñáéíóúüÑÁÉÍÓÚÜ",
149
- wiki_start_pages=["Wikipedia:Portada"],
150
- ),
151
- "Estonian": Language(
152
- name="Estonian",
153
- iso_code="et",
154
- use_ascii=False,
155
- charsets=["ISO-8859-4", "ISO-8859-13", "WINDOWS-1257"],
156
- # C, F, Š, Q, W, X, Y, Z, Ž are only for
157
- # loanwords
158
- alphabet="ABDEGHIJKLMNOPRSTUVÕÄÖÜabdeghijklmnoprstuvõäöü",
159
- wiki_start_pages=["Esileht"],
160
- ),
161
- "Finnish": Language(
162
- name="Finnish",
163
- iso_code="fi",
164
- use_ascii=True,
165
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
166
- alphabet="ÅÄÖŠŽåäöšž",
167
- wiki_start_pages=["Wikipedia:Etusivu"],
168
- ),
169
- "French": Language(
170
- name="French",
171
- iso_code="fr",
172
- use_ascii=True,
173
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
174
- alphabet="œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ",
175
- wiki_start_pages=["Wikipédia:Accueil_principal", "Bœuf (animal)"],
176
- ),
177
- "Hebrew": Language(
178
- name="Hebrew",
179
- iso_code="he",
180
- use_ascii=False,
181
- charsets=["ISO-8859-8", "WINDOWS-1255"],
182
- alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
183
- wiki_start_pages=["עמוד_ראשי"],
184
- ),
185
- "Croatian": Language(
186
- name="Croatian",
187
- iso_code="hr",
188
- # Q, W, X, Y are only used for foreign words.
189
- use_ascii=False,
190
- charsets=["ISO-8859-2", "WINDOWS-1250"],
191
- alphabet="abcčćdđefghijklmnoprsštuvzžABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ",
192
- wiki_start_pages=["Glavna_stranica"],
193
- ),
194
- "Hungarian": Language(
195
- name="Hungarian",
196
- iso_code="hu",
197
- # Q, W, X, Y are only used for foreign words.
198
- use_ascii=False,
199
- charsets=["ISO-8859-2", "WINDOWS-1250"],
200
- alphabet="abcdefghijklmnoprstuvzáéíóöőúüűABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ",
201
- wiki_start_pages=["Kezdőlap"],
202
- ),
203
- "Italian": Language(
204
- name="Italian",
205
- iso_code="it",
206
- use_ascii=True,
207
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
208
- alphabet="ÀÈÉÌÒÓÙàèéìòóù",
209
- wiki_start_pages=["Pagina_principale"],
210
- ),
211
- "Lithuanian": Language(
212
- name="Lithuanian",
213
- iso_code="lt",
214
- use_ascii=False,
215
- charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
216
- # Q, W, and X not used at all
217
- alphabet="AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽaąbcčdeęėfghiįyjklmnoprsštuųūvzž",
218
- wiki_start_pages=["Pagrindinis_puslapis"],
219
- ),
220
- "Latvian": Language(
221
- name="Latvian",
222
- iso_code="lv",
223
- use_ascii=False,
224
- charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
225
- # Q, W, X, Y are only for loanwords
226
- alphabet="AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽaābcčdeēfgģhiījkķlļmnņoprsštuūvzž",
227
- wiki_start_pages=["Sākumlapa"],
228
- ),
229
- "Macedonian": Language(
230
- name="Macedonian",
231
- iso_code="mk",
232
- use_ascii=False,
233
- charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
234
- alphabet="АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШабвгдѓежзѕијклљмнњопрстќуфхцчџш",
235
- wiki_start_pages=["Главна_страница"],
236
- ),
237
- "Dutch": Language(
238
- name="Dutch",
239
- iso_code="nl",
240
- use_ascii=True,
241
- charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
242
- wiki_start_pages=["Hoofdpagina"],
243
- ),
244
- "Polish": Language(
245
- name="Polish",
246
- iso_code="pl",
247
- # Q and X are only used for foreign words.
248
- use_ascii=False,
249
- charsets=["ISO-8859-2", "WINDOWS-1250"],
250
- alphabet="AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż",
251
- wiki_start_pages=["Wikipedia:Strona_główna"],
252
- ),
253
- "Portuguese": Language(
254
- name="Portuguese",
255
- iso_code="pt",
256
- use_ascii=True,
257
- charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
258
- alphabet="ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú",
259
- wiki_start_pages=["Wikipédia:Página_principal"],
260
- ),
261
- "Romanian": Language(
262
- name="Romanian",
263
- iso_code="ro",
264
- use_ascii=True,
265
- charsets=["ISO-8859-2", "WINDOWS-1250"],
266
- alphabet="ăâîșțĂÂÎȘȚ",
267
- wiki_start_pages=["Pagina_principală"],
268
- ),
269
- "Russian": Language(
270
- name="Russian",
271
- iso_code="ru",
272
- use_ascii=False,
273
- charsets=[
274
- "ISO-8859-5",
275
- "WINDOWS-1251",
276
- "KOI8-R",
277
- "MacCyrillic",
278
- "IBM866",
279
- "IBM855",
280
- ],
281
- alphabet="абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
282
- wiki_start_pages=["Заглавная_страница"],
283
- ),
284
- "Slovak": Language(
285
- name="Slovak",
286
- iso_code="sk",
287
- use_ascii=True,
288
- charsets=["ISO-8859-2", "WINDOWS-1250"],
289
- alphabet="áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ",
290
- wiki_start_pages=["Hlavná_stránka"],
291
- ),
292
- "Slovene": Language(
293
- name="Slovene",
294
- iso_code="sl",
295
- # Q, W, X, Y are only used for foreign words.
296
- use_ascii=False,
297
- charsets=["ISO-8859-2", "WINDOWS-1250"],
298
- alphabet="abcčdefghijklmnoprsštuvzžABCČDEFGHIJKLMNOPRSŠTUVZŽ",
299
- wiki_start_pages=["Glavna_stran"],
300
- ),
301
- # Serbian can be written in both Latin and Cyrillic, but there's no
302
- # simple way to get the Latin alphabet pages from Wikipedia through
303
- # the API, so for now we just support Cyrillic.
304
- "Serbian": Language(
305
- name="Serbian",
306
- iso_code="sr",
307
- alphabet="АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШабвгдђежзијклљмнњопрстћуфхцчџш",
308
- charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
309
- wiki_start_pages=["Главна_страна"],
310
- ),
311
- "Thai": Language(
312
- name="Thai",
313
- iso_code="th",
314
- use_ascii=False,
315
- charsets=["ISO-8859-11", "TIS-620", "CP874"],
316
- alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
317
- wiki_start_pages=["หน้าหลัก"],
318
- ),
319
- "Turkish": Language(
320
- name="Turkish",
321
- iso_code="tr",
322
- # Q, W, and X are not used by Turkish
323
- use_ascii=False,
324
- charsets=["ISO-8859-3", "ISO-8859-9", "WINDOWS-1254"],
325
- alphabet="abcçdefgğhıijklmnoöprsştuüvyzâîûABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ",
326
- wiki_start_pages=["Ana_Sayfa"],
327
- ),
328
- "Vietnamese": Language(
329
- name="Vietnamese",
330
- iso_code="vi",
331
- use_ascii=False,
332
- # Windows-1258 is the only common 8-bit
333
- # Vietnamese encoding supported by Python.
334
- # From Wikipedia:
335
- # For systems that lack support for Unicode,
336
- # dozens of 8-bit Vietnamese code pages are
337
- # available.[1] The most common are VISCII
338
- # (TCVN 5712:1993), VPS, and Windows-1258.[3]
339
- # Where ASCII is required, such as when
340
- # ensuring readability in plain text e-mail,
341
- # Vietnamese letters are often encoded
342
- # according to Vietnamese Quoted-Readable
343
- # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4]
344
- # though usage of either variable-width
345
- # scheme has declined dramatically following
346
- # the adoption of Unicode on the World Wide
347
- # Web.
348
- charsets=["WINDOWS-1258"],
349
- alphabet="aăâbcdđeêghiklmnoôơpqrstuưvxyAĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY",
350
- wiki_start_pages=["Chữ_Quốc_ngữ"],
351
- ),
352
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/cookies.py DELETED
@@ -1,561 +0,0 @@
1
- """
2
- requests.cookies
3
- ~~~~~~~~~~~~~~~~
4
-
5
- Compatibility code to be able to use `cookielib.CookieJar` with requests.
6
-
7
- requests.utils imports from here, so be careful with imports.
8
- """
9
-
10
- import calendar
11
- import copy
12
- import time
13
-
14
- from ._internal_utils import to_native_string
15
- from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse
16
-
17
- try:
18
- import threading
19
- except ImportError:
20
- import dummy_threading as threading
21
-
22
-
23
- class MockRequest:
24
- """Wraps a `requests.Request` to mimic a `urllib2.Request`.
25
-
26
- The code in `cookielib.CookieJar` expects this interface in order to correctly
27
- manage cookie policies, i.e., determine whether a cookie can be set, given the
28
- domains of the request and the cookie.
29
-
30
- The original request object is read-only. The client is responsible for collecting
31
- the new headers via `get_new_headers()` and interpreting them appropriately. You
32
- probably want `get_cookie_header`, defined below.
33
- """
34
-
35
- def __init__(self, request):
36
- self._r = request
37
- self._new_headers = {}
38
- self.type = urlparse(self._r.url).scheme
39
-
40
- def get_type(self):
41
- return self.type
42
-
43
- def get_host(self):
44
- return urlparse(self._r.url).netloc
45
-
46
- def get_origin_req_host(self):
47
- return self.get_host()
48
-
49
- def get_full_url(self):
50
- # Only return the response's URL if the user hadn't set the Host
51
- # header
52
- if not self._r.headers.get("Host"):
53
- return self._r.url
54
- # If they did set it, retrieve it and reconstruct the expected domain
55
- host = to_native_string(self._r.headers["Host"], encoding="utf-8")
56
- parsed = urlparse(self._r.url)
57
- # Reconstruct the URL as we expect it
58
- return urlunparse(
59
- [
60
- parsed.scheme,
61
- host,
62
- parsed.path,
63
- parsed.params,
64
- parsed.query,
65
- parsed.fragment,
66
- ]
67
- )
68
-
69
- def is_unverifiable(self):
70
- return True
71
-
72
- def has_header(self, name):
73
- return name in self._r.headers or name in self._new_headers
74
-
75
- def get_header(self, name, default=None):
76
- return self._r.headers.get(name, self._new_headers.get(name, default))
77
-
78
- def add_header(self, key, val):
79
- """cookielib has no legitimate use for this method; add it back if you find one."""
80
- raise NotImplementedError(
81
- "Cookie headers should be added with add_unredirected_header()"
82
- )
83
-
84
- def add_unredirected_header(self, name, value):
85
- self._new_headers[name] = value
86
-
87
- def get_new_headers(self):
88
- return self._new_headers
89
-
90
- @property
91
- def unverifiable(self):
92
- return self.is_unverifiable()
93
-
94
- @property
95
- def origin_req_host(self):
96
- return self.get_origin_req_host()
97
-
98
- @property
99
- def host(self):
100
- return self.get_host()
101
-
102
-
103
- class MockResponse:
104
- """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
105
-
106
- ...what? Basically, expose the parsed HTTP headers from the server response
107
- the way `cookielib` expects to see them.
108
- """
109
-
110
- def __init__(self, headers):
111
- """Make a MockResponse for `cookielib` to read.
112
-
113
- :param headers: a httplib.HTTPMessage or analogous carrying the headers
114
- """
115
- self._headers = headers
116
-
117
- def info(self):
118
- return self._headers
119
-
120
- def getheaders(self, name):
121
- self._headers.getheaders(name)
122
-
123
-
124
- def extract_cookies_to_jar(jar, request, response):
125
- """Extract the cookies from the response into a CookieJar.
126
-
127
- :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
128
- :param request: our own requests.Request object
129
- :param response: urllib3.HTTPResponse object
130
- """
131
- if not (hasattr(response, "_original_response") and response._original_response):
132
- return
133
- # the _original_response field is the wrapped httplib.HTTPResponse object,
134
- req = MockRequest(request)
135
- # pull out the HTTPMessage with the headers and put it in the mock:
136
- res = MockResponse(response._original_response.msg)
137
- jar.extract_cookies(res, req)
138
-
139
-
140
- def get_cookie_header(jar, request):
141
- """
142
- Produce an appropriate Cookie header string to be sent with `request`, or None.
143
-
144
- :rtype: str
145
- """
146
- r = MockRequest(request)
147
- jar.add_cookie_header(r)
148
- return r.get_new_headers().get("Cookie")
149
-
150
-
151
- def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
152
- """Unsets a cookie by name, by default over all domains and paths.
153
-
154
- Wraps CookieJar.clear(), is O(n).
155
- """
156
- clearables = []
157
- for cookie in cookiejar:
158
- if cookie.name != name:
159
- continue
160
- if domain is not None and domain != cookie.domain:
161
- continue
162
- if path is not None and path != cookie.path:
163
- continue
164
- clearables.append((cookie.domain, cookie.path, cookie.name))
165
-
166
- for domain, path, name in clearables:
167
- cookiejar.clear(domain, path, name)
168
-
169
-
170
- class CookieConflictError(RuntimeError):
171
- """There are two cookies that meet the criteria specified in the cookie jar.
172
- Use .get and .set and include domain and path args in order to be more specific.
173
- """
174
-
175
-
176
- class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
177
- """Compatibility class; is a cookielib.CookieJar, but exposes a dict
178
- interface.
179
-
180
- This is the CookieJar we create by default for requests and sessions that
181
- don't specify one, since some clients may expect response.cookies and
182
- session.cookies to support dict operations.
183
-
184
- Requests does not use the dict interface internally; it's just for
185
- compatibility with external client code. All requests code should work
186
- out of the box with externally provided instances of ``CookieJar``, e.g.
187
- ``LWPCookieJar`` and ``FileCookieJar``.
188
-
189
- Unlike a regular CookieJar, this class is pickleable.
190
-
191
- .. warning:: dictionary operations that are normally O(1) may be O(n).
192
- """
193
-
194
- def get(self, name, default=None, domain=None, path=None):
195
- """Dict-like get() that also supports optional domain and path args in
196
- order to resolve naming collisions from using one cookie jar over
197
- multiple domains.
198
-
199
- .. warning:: operation is O(n), not O(1).
200
- """
201
- try:
202
- return self._find_no_duplicates(name, domain, path)
203
- except KeyError:
204
- return default
205
-
206
- def set(self, name, value, **kwargs):
207
- """Dict-like set() that also supports optional domain and path args in
208
- order to resolve naming collisions from using one cookie jar over
209
- multiple domains.
210
- """
211
- # support client code that unsets cookies by assignment of a None value:
212
- if value is None:
213
- remove_cookie_by_name(
214
- self, name, domain=kwargs.get("domain"), path=kwargs.get("path")
215
- )
216
- return
217
-
218
- if isinstance(value, Morsel):
219
- c = morsel_to_cookie(value)
220
- else:
221
- c = create_cookie(name, value, **kwargs)
222
- self.set_cookie(c)
223
- return c
224
-
225
- def iterkeys(self):
226
- """Dict-like iterkeys() that returns an iterator of names of cookies
227
- from the jar.
228
-
229
- .. seealso:: itervalues() and iteritems().
230
- """
231
- for cookie in iter(self):
232
- yield cookie.name
233
-
234
- def keys(self):
235
- """Dict-like keys() that returns a list of names of cookies from the
236
- jar.
237
-
238
- .. seealso:: values() and items().
239
- """
240
- return list(self.iterkeys())
241
-
242
- def itervalues(self):
243
- """Dict-like itervalues() that returns an iterator of values of cookies
244
- from the jar.
245
-
246
- .. seealso:: iterkeys() and iteritems().
247
- """
248
- for cookie in iter(self):
249
- yield cookie.value
250
-
251
- def values(self):
252
- """Dict-like values() that returns a list of values of cookies from the
253
- jar.
254
-
255
- .. seealso:: keys() and items().
256
- """
257
- return list(self.itervalues())
258
-
259
- def iteritems(self):
260
- """Dict-like iteritems() that returns an iterator of name-value tuples
261
- from the jar.
262
-
263
- .. seealso:: iterkeys() and itervalues().
264
- """
265
- for cookie in iter(self):
266
- yield cookie.name, cookie.value
267
-
268
- def items(self):
269
- """Dict-like items() that returns a list of name-value tuples from the
270
- jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
271
- vanilla python dict of key value pairs.
272
-
273
- .. seealso:: keys() and values().
274
- """
275
- return list(self.iteritems())
276
-
277
- def list_domains(self):
278
- """Utility method to list all the domains in the jar."""
279
- domains = []
280
- for cookie in iter(self):
281
- if cookie.domain not in domains:
282
- domains.append(cookie.domain)
283
- return domains
284
-
285
- def list_paths(self):
286
- """Utility method to list all the paths in the jar."""
287
- paths = []
288
- for cookie in iter(self):
289
- if cookie.path not in paths:
290
- paths.append(cookie.path)
291
- return paths
292
-
293
- def multiple_domains(self):
294
- """Returns True if there are multiple domains in the jar.
295
- Returns False otherwise.
296
-
297
- :rtype: bool
298
- """
299
- domains = []
300
- for cookie in iter(self):
301
- if cookie.domain is not None and cookie.domain in domains:
302
- return True
303
- domains.append(cookie.domain)
304
- return False # there is only one domain in jar
305
-
306
- def get_dict(self, domain=None, path=None):
307
- """Takes as an argument an optional domain and path and returns a plain
308
- old Python dict of name-value pairs of cookies that meet the
309
- requirements.
310
-
311
- :rtype: dict
312
- """
313
- dictionary = {}
314
- for cookie in iter(self):
315
- if (domain is None or cookie.domain == domain) and (
316
- path is None or cookie.path == path
317
- ):
318
- dictionary[cookie.name] = cookie.value
319
- return dictionary
320
-
321
- def __contains__(self, name):
322
- try:
323
- return super().__contains__(name)
324
- except CookieConflictError:
325
- return True
326
-
327
- def __getitem__(self, name):
328
- """Dict-like __getitem__() for compatibility with client code. Throws
329
- exception if there are more than one cookie with name. In that case,
330
- use the more explicit get() method instead.
331
-
332
- .. warning:: operation is O(n), not O(1).
333
- """
334
- return self._find_no_duplicates(name)
335
-
336
- def __setitem__(self, name, value):
337
- """Dict-like __setitem__ for compatibility with client code. Throws
338
- exception if there is already a cookie of that name in the jar. In that
339
- case, use the more explicit set() method instead.
340
- """
341
- self.set(name, value)
342
-
343
- def __delitem__(self, name):
344
- """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
345
- ``remove_cookie_by_name()``.
346
- """
347
- remove_cookie_by_name(self, name)
348
-
349
- def set_cookie(self, cookie, *args, **kwargs):
350
- if (
351
- hasattr(cookie.value, "startswith")
352
- and cookie.value.startswith('"')
353
- and cookie.value.endswith('"')
354
- ):
355
- cookie.value = cookie.value.replace('\\"', "")
356
- return super().set_cookie(cookie, *args, **kwargs)
357
-
358
- def update(self, other):
359
- """Updates this jar with cookies from another CookieJar or dict-like"""
360
- if isinstance(other, cookielib.CookieJar):
361
- for cookie in other:
362
- self.set_cookie(copy.copy(cookie))
363
- else:
364
- super().update(other)
365
-
366
- def _find(self, name, domain=None, path=None):
367
- """Requests uses this method internally to get cookie values.
368
-
369
- If there are conflicting cookies, _find arbitrarily chooses one.
370
- See _find_no_duplicates if you want an exception thrown if there are
371
- conflicting cookies.
372
-
373
- :param name: a string containing name of cookie
374
- :param domain: (optional) string containing domain of cookie
375
- :param path: (optional) string containing path of cookie
376
- :return: cookie.value
377
- """
378
- for cookie in iter(self):
379
- if cookie.name == name:
380
- if domain is None or cookie.domain == domain:
381
- if path is None or cookie.path == path:
382
- return cookie.value
383
-
384
- raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
385
-
386
- def _find_no_duplicates(self, name, domain=None, path=None):
387
- """Both ``__get_item__`` and ``get`` call this function: it's never
388
- used elsewhere in Requests.
389
-
390
- :param name: a string containing name of cookie
391
- :param domain: (optional) string containing domain of cookie
392
- :param path: (optional) string containing path of cookie
393
- :raises KeyError: if cookie is not found
394
- :raises CookieConflictError: if there are multiple cookies
395
- that match name and optionally domain and path
396
- :return: cookie.value
397
- """
398
- toReturn = None
399
- for cookie in iter(self):
400
- if cookie.name == name:
401
- if domain is None or cookie.domain == domain:
402
- if path is None or cookie.path == path:
403
- if toReturn is not None:
404
- # if there are multiple cookies that meet passed in criteria
405
- raise CookieConflictError(
406
- f"There are multiple cookies with name, {name!r}"
407
- )
408
- # we will eventually return this as long as no cookie conflict
409
- toReturn = cookie.value
410
-
411
- if toReturn:
412
- return toReturn
413
- raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
414
-
415
- def __getstate__(self):
416
- """Unlike a normal CookieJar, this class is pickleable."""
417
- state = self.__dict__.copy()
418
- # remove the unpickleable RLock object
419
- state.pop("_cookies_lock")
420
- return state
421
-
422
- def __setstate__(self, state):
423
- """Unlike a normal CookieJar, this class is pickleable."""
424
- self.__dict__.update(state)
425
- if "_cookies_lock" not in self.__dict__:
426
- self._cookies_lock = threading.RLock()
427
-
428
- def copy(self):
429
- """Return a copy of this RequestsCookieJar."""
430
- new_cj = RequestsCookieJar()
431
- new_cj.set_policy(self.get_policy())
432
- new_cj.update(self)
433
- return new_cj
434
-
435
- def get_policy(self):
436
- """Return the CookiePolicy instance used."""
437
- return self._policy
438
-
439
-
440
- def _copy_cookie_jar(jar):
441
- if jar is None:
442
- return None
443
-
444
- if hasattr(jar, "copy"):
445
- # We're dealing with an instance of RequestsCookieJar
446
- return jar.copy()
447
- # We're dealing with a generic CookieJar instance
448
- new_jar = copy.copy(jar)
449
- new_jar.clear()
450
- for cookie in jar:
451
- new_jar.set_cookie(copy.copy(cookie))
452
- return new_jar
453
-
454
-
455
- def create_cookie(name, value, **kwargs):
456
- """Make a cookie from underspecified parameters.
457
-
458
- By default, the pair of `name` and `value` will be set for the domain ''
459
- and sent on every request (this is sometimes called a "supercookie").
460
- """
461
- result = {
462
- "version": 0,
463
- "name": name,
464
- "value": value,
465
- "port": None,
466
- "domain": "",
467
- "path": "/",
468
- "secure": False,
469
- "expires": None,
470
- "discard": True,
471
- "comment": None,
472
- "comment_url": None,
473
- "rest": {"HttpOnly": None},
474
- "rfc2109": False,
475
- }
476
-
477
- badargs = set(kwargs) - set(result)
478
- if badargs:
479
- raise TypeError(
480
- f"create_cookie() got unexpected keyword arguments: {list(badargs)}"
481
- )
482
-
483
- result.update(kwargs)
484
- result["port_specified"] = bool(result["port"])
485
- result["domain_specified"] = bool(result["domain"])
486
- result["domain_initial_dot"] = result["domain"].startswith(".")
487
- result["path_specified"] = bool(result["path"])
488
-
489
- return cookielib.Cookie(**result)
490
-
491
-
492
- def morsel_to_cookie(morsel):
493
- """Convert a Morsel object into a Cookie containing the one k/v pair."""
494
-
495
- expires = None
496
- if morsel["max-age"]:
497
- try:
498
- expires = int(time.time() + int(morsel["max-age"]))
499
- except ValueError:
500
- raise TypeError(f"max-age: {morsel['max-age']} must be integer")
501
- elif morsel["expires"]:
502
- time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
503
- expires = calendar.timegm(time.strptime(morsel["expires"], time_template))
504
- return create_cookie(
505
- comment=morsel["comment"],
506
- comment_url=bool(morsel["comment"]),
507
- discard=False,
508
- domain=morsel["domain"],
509
- expires=expires,
510
- name=morsel.key,
511
- path=morsel["path"],
512
- port=None,
513
- rest={"HttpOnly": morsel["httponly"]},
514
- rfc2109=False,
515
- secure=bool(morsel["secure"]),
516
- value=morsel.value,
517
- version=morsel["version"] or 0,
518
- )
519
-
520
-
521
- def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
522
- """Returns a CookieJar from a key/value dictionary.
523
-
524
- :param cookie_dict: Dict of key/values to insert into CookieJar.
525
- :param cookiejar: (optional) A cookiejar to add the cookies to.
526
- :param overwrite: (optional) If False, will not replace cookies
527
- already in the jar with new ones.
528
- :rtype: CookieJar
529
- """
530
- if cookiejar is None:
531
- cookiejar = RequestsCookieJar()
532
-
533
- if cookie_dict is not None:
534
- names_from_jar = [cookie.name for cookie in cookiejar]
535
- for name in cookie_dict:
536
- if overwrite or (name not in names_from_jar):
537
- cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
538
-
539
- return cookiejar
540
-
541
-
542
- def merge_cookies(cookiejar, cookies):
543
- """Add cookies to cookiejar and returns a merged CookieJar.
544
-
545
- :param cookiejar: CookieJar object to add the cookies to.
546
- :param cookies: Dictionary or CookieJar object to be added.
547
- :rtype: CookieJar
548
- """
549
- if not isinstance(cookiejar, cookielib.CookieJar):
550
- raise ValueError("You can only merge into CookieJar")
551
-
552
- if isinstance(cookies, dict):
553
- cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False)
554
- elif isinstance(cookies, cookielib.CookieJar):
555
- try:
556
- cookiejar.update(cookies)
557
- except AttributeError:
558
- for cookie_in_jar in cookies:
559
- cookiejar.set_cookie(cookie_in_jar)
560
-
561
- return cookiejar
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/ans_punct.py DELETED
@@ -1,105 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
4
- # based on VQA Evaluation Code
5
- # --------------------------------------------------------
6
-
7
- import re
8
-
9
- contractions = {
10
- "aint": "ain't", "arent": "aren't", "cant": "can't", "couldve":
11
- "could've", "couldnt": "couldn't", "couldn'tve": "couldn't've",
12
- "couldnt've": "couldn't've", "didnt": "didn't", "doesnt":
13
- "doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've":
14
- "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent":
15
- "haven't", "hed": "he'd", "hed've": "he'd've", "he'dve":
16
- "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll",
17
- "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im":
18
- "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've":
19
- "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's",
20
- "maam": "ma'am", "mightnt": "mightn't", "mightnt've":
21
- "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've",
22
- "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't",
23
- "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't",
24
- "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat":
25
- "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve":
26
- "she'd've", "she's": "she's", "shouldve": "should've", "shouldnt":
27
- "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve":
28
- "shouldn't've", "somebody'd": "somebodyd", "somebodyd've":
29
- "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll":
30
- "somebody'll", "somebodys": "somebody's", "someoned": "someone'd",
31
- "someoned've": "someone'd've", "someone'dve": "someone'd've",
32
- "someonell": "someone'll", "someones": "someone's", "somethingd":
33
- "something'd", "somethingd've": "something'd've", "something'dve":
34
- "something'd've", "somethingll": "something'll", "thats":
35
- "that's", "thered": "there'd", "thered've": "there'd've",
36
- "there'dve": "there'd've", "therere": "there're", "theres":
37
- "there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve":
38
- "they'd've", "theyll": "they'll", "theyre": "they're", "theyve":
39
- "they've", "twas": "'twas", "wasnt": "wasn't", "wed've":
40
- "we'd've", "we'dve": "we'd've", "weve": "we've", "werent":
41
- "weren't", "whatll": "what'll", "whatre": "what're", "whats":
42
- "what's", "whatve": "what've", "whens": "when's", "whered":
43
- "where'd", "wheres": "where's", "whereve": "where've", "whod":
44
- "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl":
45
- "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll",
46
- "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve":
47
- "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've",
48
- "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll":
49
- "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've",
50
- "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd":
51
- "you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll":
52
- "you'll", "youre": "you're", "youve": "you've"
53
- }
54
-
55
- manual_map = { 'none': '0',
56
- 'zero': '0',
57
- 'one': '1',
58
- 'two': '2',
59
- 'three': '3',
60
- 'four': '4',
61
- 'five': '5',
62
- 'six': '6',
63
- 'seven': '7',
64
- 'eight': '8',
65
- 'nine': '9',
66
- 'ten': '10'}
67
- articles = ['a', 'an', 'the']
68
- period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
69
- comma_strip = re.compile("(\d)(\,)(\d)")
70
- punct = [';', r"/", '[', ']', '"', '{', '}',
71
- '(', ')', '=', '+', '\\', '_', '-',
72
- '>', '<', '@', '`', ',', '?', '!']
73
-
74
- def process_punctuation(inText):
75
- outText = inText
76
- for p in punct:
77
- if (p + ' ' in inText or ' ' + p in inText) \
78
- or (re.search(comma_strip, inText) != None):
79
- outText = outText.replace(p, '')
80
- else:
81
- outText = outText.replace(p, ' ')
82
- outText = period_strip.sub("", outText, re.UNICODE)
83
- return outText
84
-
85
-
86
- def process_digit_article(inText):
87
- outText = []
88
- tempText = inText.lower().split()
89
- for word in tempText:
90
- word = manual_map.setdefault(word, word)
91
- if word not in articles:
92
- outText.append(word)
93
- else:
94
- pass
95
- for wordId, word in enumerate(outText):
96
- if word in contractions:
97
- outText[wordId] = contractions[word]
98
- outText = ' '.join(outText)
99
- return outText
100
-
101
-
102
- def prep_ans(answer):
103
- answer = process_digit_article(process_punctuation(answer))
104
- answer = answer.replace(',', '')
105
- return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/config/config.h DELETED
@@ -1,39 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file config.h
18
- * \brief Defines platform configuration.
19
- */
20
-
21
- #pragma once
22
-
23
- // NOTE: The order of these #includes matters.
24
-
25
- #include <thrust/detail/config/simple_defines.h>
26
- #include <thrust/detail/config/compiler.h>
27
- #include <thrust/detail/config/cpp_dialect.h>
28
- #include <thrust/detail/config/cpp_compatibility.h>
29
- #include <thrust/detail/config/deprecated.h>
30
- // host_system.h & device_system.h must be #included as early as possible
31
- // because other config headers depend on it
32
- #include <thrust/detail/config/host_system.h>
33
- #include <thrust/detail/config/device_system.h>
34
- #include <thrust/detail/config/host_device.h>
35
- #include <thrust/detail/config/debug.h>
36
- #include <thrust/detail/config/forceinline.h>
37
- #include <thrust/detail/config/exec_check_disable.h>
38
- #include <thrust/detail/config/global_workarounds.h>
39
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/binary_search.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits binary_search
22
- #include <thrust/system/cpp/detail/binary_search.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/evaluation/bbox_overlaps.py DELETED
@@ -1,48 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
5
- """Calculate the ious between each bbox of bboxes1 and bboxes2.
6
-
7
- Args:
8
- bboxes1(ndarray): shape (n, 4)
9
- bboxes2(ndarray): shape (k, 4)
10
- mode(str): iou (intersection over union) or iof (intersection
11
- over foreground)
12
-
13
- Returns:
14
- ious(ndarray): shape (n, k)
15
- """
16
-
17
- assert mode in ['iou', 'iof']
18
-
19
- bboxes1 = bboxes1.astype(np.float32)
20
- bboxes2 = bboxes2.astype(np.float32)
21
- rows = bboxes1.shape[0]
22
- cols = bboxes2.shape[0]
23
- ious = np.zeros((rows, cols), dtype=np.float32)
24
- if rows * cols == 0:
25
- return ious
26
- exchange = False
27
- if bboxes1.shape[0] > bboxes2.shape[0]:
28
- bboxes1, bboxes2 = bboxes2, bboxes1
29
- ious = np.zeros((cols, rows), dtype=np.float32)
30
- exchange = True
31
- area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
32
- area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
33
- for i in range(bboxes1.shape[0]):
34
- x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
35
- y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
36
- x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
37
- y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
38
- overlap = np.maximum(x_end - x_start, 0) * np.maximum(
39
- y_end - y_start, 0)
40
- if mode == 'iou':
41
- union = area1[i] + area2 - overlap
42
- else:
43
- union = area1[i] if not exchange else area2
44
- union = np.maximum(union, eps)
45
- ious[i, :] = overlap / union
46
- if exchange:
47
- ious = ious.T
48
- return ious
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/losses/smooth_l1_loss.py DELETED
@@ -1,139 +0,0 @@
1
- import mmcv
2
- import torch
3
- import torch.nn as nn
4
-
5
- from ..builder import LOSSES
6
- from .utils import weighted_loss
7
-
8
-
9
- @mmcv.jit(derivate=True, coderize=True)
10
- @weighted_loss
11
- def smooth_l1_loss(pred, target, beta=1.0):
12
- """Smooth L1 loss.
13
-
14
- Args:
15
- pred (torch.Tensor): The prediction.
16
- target (torch.Tensor): The learning target of the prediction.
17
- beta (float, optional): The threshold in the piecewise function.
18
- Defaults to 1.0.
19
-
20
- Returns:
21
- torch.Tensor: Calculated loss
22
- """
23
- assert beta > 0
24
- assert pred.size() == target.size() and target.numel() > 0
25
- diff = torch.abs(pred - target)
26
- loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
27
- diff - 0.5 * beta)
28
- return loss
29
-
30
-
31
- @mmcv.jit(derivate=True, coderize=True)
32
- @weighted_loss
33
- def l1_loss(pred, target):
34
- """L1 loss.
35
-
36
- Args:
37
- pred (torch.Tensor): The prediction.
38
- target (torch.Tensor): The learning target of the prediction.
39
-
40
- Returns:
41
- torch.Tensor: Calculated loss
42
- """
43
- assert pred.size() == target.size() and target.numel() > 0
44
- loss = torch.abs(pred - target)
45
- return loss
46
-
47
-
48
- @LOSSES.register_module()
49
- class SmoothL1Loss(nn.Module):
50
- """Smooth L1 loss.
51
-
52
- Args:
53
- beta (float, optional): The threshold in the piecewise function.
54
- Defaults to 1.0.
55
- reduction (str, optional): The method to reduce the loss.
56
- Options are "none", "mean" and "sum". Defaults to "mean".
57
- loss_weight (float, optional): The weight of loss.
58
- """
59
-
60
- def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
61
- super(SmoothL1Loss, self).__init__()
62
- self.beta = beta
63
- self.reduction = reduction
64
- self.loss_weight = loss_weight
65
-
66
- def forward(self,
67
- pred,
68
- target,
69
- weight=None,
70
- avg_factor=None,
71
- reduction_override=None,
72
- **kwargs):
73
- """Forward function.
74
-
75
- Args:
76
- pred (torch.Tensor): The prediction.
77
- target (torch.Tensor): The learning target of the prediction.
78
- weight (torch.Tensor, optional): The weight of loss for each
79
- prediction. Defaults to None.
80
- avg_factor (int, optional): Average factor that is used to average
81
- the loss. Defaults to None.
82
- reduction_override (str, optional): The reduction method used to
83
- override the original reduction method of the loss.
84
- Defaults to None.
85
- """
86
- assert reduction_override in (None, 'none', 'mean', 'sum')
87
- reduction = (
88
- reduction_override if reduction_override else self.reduction)
89
- loss_bbox = self.loss_weight * smooth_l1_loss(
90
- pred,
91
- target,
92
- weight,
93
- beta=self.beta,
94
- reduction=reduction,
95
- avg_factor=avg_factor,
96
- **kwargs)
97
- return loss_bbox
98
-
99
-
100
- @LOSSES.register_module()
101
- class L1Loss(nn.Module):
102
- """L1 loss.
103
-
104
- Args:
105
- reduction (str, optional): The method to reduce the loss.
106
- Options are "none", "mean" and "sum".
107
- loss_weight (float, optional): The weight of loss.
108
- """
109
-
110
- def __init__(self, reduction='mean', loss_weight=1.0):
111
- super(L1Loss, self).__init__()
112
- self.reduction = reduction
113
- self.loss_weight = loss_weight
114
-
115
- def forward(self,
116
- pred,
117
- target,
118
- weight=None,
119
- avg_factor=None,
120
- reduction_override=None):
121
- """Forward function.
122
-
123
- Args:
124
- pred (torch.Tensor): The prediction.
125
- target (torch.Tensor): The learning target of the prediction.
126
- weight (torch.Tensor, optional): The weight of loss for each
127
- prediction. Defaults to None.
128
- avg_factor (int, optional): Average factor that is used to average
129
- the loss. Defaults to None.
130
- reduction_override (str, optional): The reduction method used to
131
- override the original reduction method of the loss.
132
- Defaults to None.
133
- """
134
- assert reduction_override in (None, 'none', 'mean', 'sum')
135
- reduction = (
136
- reduction_override if reduction_override else self.reduction)
137
- loss_bbox = self.loss_weight * l1_loss(
138
- pred, target, weight, reduction=reduction, avg_factor=avg_factor)
139
- return loss_bbox
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/training/modules/__init__.py DELETED
@@ -1,31 +0,0 @@
1
- import logging
2
-
3
- from saicinpainting.training.modules.ffc import FFCResNetGenerator
4
- from saicinpainting.training.modules.pix2pixhd import GlobalGenerator, MultiDilatedGlobalGenerator, \
5
- NLayerDiscriminator, MultidilatedNLayerDiscriminator
6
-
7
- def make_generator(config, kind, **kwargs):
8
- logging.info(f'Make generator {kind}')
9
-
10
- if kind == 'pix2pixhd_multidilated':
11
- return MultiDilatedGlobalGenerator(**kwargs)
12
-
13
- if kind == 'pix2pixhd_global':
14
- return GlobalGenerator(**kwargs)
15
-
16
- if kind == 'ffc_resnet':
17
- return FFCResNetGenerator(**kwargs)
18
-
19
- raise ValueError(f'Unknown generator kind {kind}')
20
-
21
-
22
- def make_discriminator(kind, **kwargs):
23
- logging.info(f'Make discriminator {kind}')
24
-
25
- if kind == 'pix2pixhd_nlayer_multidilated':
26
- return MultidilatedNLayerDiscriminator(**kwargs)
27
-
28
- if kind == 'pix2pixhd_nlayer':
29
- return NLayerDiscriminator(**kwargs)
30
-
31
- raise ValueError(f'Unknown discriminator kind {kind}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/evaluation/cityscapes_evaluation.py DELETED
@@ -1,194 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import glob
3
- import logging
4
- import numpy as np
5
- import os
6
- import tempfile
7
- from collections import OrderedDict
8
- import torch
9
- from PIL import Image
10
-
11
- from detectron2.data import MetadataCatalog
12
- from detectron2.utils import comm
13
- from detectron2.utils.file_io import PathManager
14
-
15
- from .evaluator import DatasetEvaluator
16
-
17
-
18
- class CityscapesEvaluator(DatasetEvaluator):
19
- """
20
- Base class for evaluation using cityscapes API.
21
- """
22
-
23
- def __init__(self, dataset_name):
24
- """
25
- Args:
26
- dataset_name (str): the name of the dataset.
27
- It must have the following metadata associated with it:
28
- "thing_classes", "gt_dir".
29
- """
30
- self._metadata = MetadataCatalog.get(dataset_name)
31
- self._cpu_device = torch.device("cpu")
32
- self._logger = logging.getLogger(__name__)
33
-
34
- def reset(self):
35
- self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
36
- self._temp_dir = self._working_dir.name
37
- # All workers will write to the same results directory
38
- # TODO this does not work in distributed training
39
- self._temp_dir = comm.all_gather(self._temp_dir)[0]
40
- if self._temp_dir != self._working_dir.name:
41
- self._working_dir.cleanup()
42
- self._logger.info(
43
- "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
44
- )
45
-
46
-
47
- class CityscapesInstanceEvaluator(CityscapesEvaluator):
48
- """
49
- Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
50
-
51
- Note:
52
- * It does not work in multi-machine distributed training.
53
- * It contains a synchronization, therefore has to be used on all ranks.
54
- * Only the main process runs evaluation.
55
- """
56
-
57
- def process(self, inputs, outputs):
58
- from cityscapesscripts.helpers.labels import name2label
59
-
60
- for input, output in zip(inputs, outputs):
61
- file_name = input["file_name"]
62
- basename = os.path.splitext(os.path.basename(file_name))[0]
63
- pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
64
-
65
- if "instances" in output:
66
- output = output["instances"].to(self._cpu_device)
67
- num_instances = len(output)
68
- with open(pred_txt, "w") as fout:
69
- for i in range(num_instances):
70
- pred_class = output.pred_classes[i]
71
- classes = self._metadata.thing_classes[pred_class]
72
- class_id = name2label[classes].id
73
- score = output.scores[i]
74
- mask = output.pred_masks[i].numpy().astype("uint8")
75
- png_filename = os.path.join(
76
- self._temp_dir, basename + "_{}_{}.png".format(i, classes)
77
- )
78
-
79
- Image.fromarray(mask * 255).save(png_filename)
80
- fout.write(
81
- "{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
82
- )
83
- else:
84
- # Cityscapes requires a prediction file for every ground truth image.
85
- with open(pred_txt, "w") as fout:
86
- pass
87
-
88
- def evaluate(self):
89
- """
90
- Returns:
91
- dict: has a key "segm", whose value is a dict of "AP" and "AP50".
92
- """
93
- comm.synchronize()
94
- if comm.get_rank() > 0:
95
- return
96
- import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
97
-
98
- self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
99
-
100
- # set some global states in cityscapes evaluation API, before evaluating
101
- cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
102
- cityscapes_eval.args.predictionWalk = None
103
- cityscapes_eval.args.JSONOutput = False
104
- cityscapes_eval.args.colorized = False
105
- cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
106
-
107
- # These lines are adopted from
108
- # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
109
- gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
110
- groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
111
- assert len(
112
- groundTruthImgList
113
- ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
114
- cityscapes_eval.args.groundTruthSearch
115
- )
116
- predictionImgList = []
117
- for gt in groundTruthImgList:
118
- predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
119
- results = cityscapes_eval.evaluateImgLists(
120
- predictionImgList, groundTruthImgList, cityscapes_eval.args
121
- )["averages"]
122
-
123
- ret = OrderedDict()
124
- ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
125
- self._working_dir.cleanup()
126
- return ret
127
-
128
-
129
- class CityscapesSemSegEvaluator(CityscapesEvaluator):
130
- """
131
- Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.
132
-
133
- Note:
134
- * It does not work in multi-machine distributed training.
135
- * It contains a synchronization, therefore has to be used on all ranks.
136
- * Only the main process runs evaluation.
137
- """
138
-
139
- def process(self, inputs, outputs):
140
- from cityscapesscripts.helpers.labels import trainId2label
141
-
142
- for input, output in zip(inputs, outputs):
143
- file_name = input["file_name"]
144
- basename = os.path.splitext(os.path.basename(file_name))[0]
145
- pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
146
-
147
- output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
148
- pred = 255 * np.ones(output.shape, dtype=np.uint8)
149
- for train_id, label in trainId2label.items():
150
- if label.ignoreInEval:
151
- continue
152
- pred[output == train_id] = label.id
153
- Image.fromarray(pred).save(pred_filename)
154
-
155
- def evaluate(self):
156
- comm.synchronize()
157
- if comm.get_rank() > 0:
158
- return
159
- # Load the Cityscapes eval script *after* setting the required env var,
160
- # since the script reads CITYSCAPES_DATASET into global variables at load time.
161
- import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
162
-
163
- self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
164
-
165
- # set some global states in cityscapes evaluation API, before evaluating
166
- cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
167
- cityscapes_eval.args.predictionWalk = None
168
- cityscapes_eval.args.JSONOutput = False
169
- cityscapes_eval.args.colorized = False
170
-
171
- # These lines are adopted from
172
- # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
173
- gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
174
- groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
175
- assert len(
176
- groundTruthImgList
177
- ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
178
- cityscapes_eval.args.groundTruthSearch
179
- )
180
- predictionImgList = []
181
- for gt in groundTruthImgList:
182
- predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
183
- results = cityscapes_eval.evaluateImgLists(
184
- predictionImgList, groundTruthImgList, cityscapes_eval.args
185
- )
186
- ret = OrderedDict()
187
- ret["sem_seg"] = {
188
- "IoU": 100.0 * results["averageScoreClasses"],
189
- "iIoU": 100.0 * results["averageScoreInstClasses"],
190
- "IoU_sup": 100.0 * results["averageScoreCategories"],
191
- "iIoU_sup": 100.0 * results["averageScoreInstCategories"],
192
- }
193
- self._working_dir.cleanup()
194
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CognitiveLabs/Research-Assistant/README.md DELETED
@@ -1,78 +0,0 @@
1
- ---
2
- title: AI-Research-Assistant
3
- app_file: app.py
4
- sdk: gradio
5
- sdk_version: 3.38.0
6
- duplicated_from: zej97/AI-Research-Assistant
7
- ---
8
- <div style="width: 100%;">
9
- <img src="./statics/title.svg" style="width: 100%;">
10
- <div align="right">
11
- <a href="./README.md">English</a> |
12
- <a href="./statics/README_zh.md">中文</a>
13
- </div>
14
- </div>
15
-
16
- Inspired by [gpt-researcher](https://github.com/assafelovic/gpt-researcher). This project endeavors to develop an AI research assistant capable of **generating research reports** effortlessly for researchers. For instance, researchers can request the AI research assistant to compose a report on *the latest advancements in the field of superconductors as of 2023*, which is currently a trending topic. The AI research assistant will subsequently compile a report based on the relevant information obtained from the internet. Now, AIRA also offers support for **academic English polishing**.
17
-
18
- <!-- make a table -->
19
- | Example1-1 | Example1-2 | Example1-3 |
20
- | :----------------------------------: | :----------------------------------: | :----------------------------------: |
21
- | <img src="./statics/example1-1.png"> | <img src="./statics/example1-2.png"> | <img src="./statics/example1-3.png"> |
22
-
23
- The currently supported agents encompass a wide range of fields, including *finance, business analysis, clinical medicine, basic medicine, travel, academic research and sociology*.
24
-
25
- In addition to official api, this project offers an alternative approach to generating research reports by utilizing a third-party API. For access to this third-party API, please refer to [chimeragpt](https://chimeragpt.adventblocks.cc/) or [GPT-API-free](https://github.com/chatanywhere/GPT_API_free). Before running the project, kindly ensure that you set the environment variables `OPENAI_API_KEY` and `OPENAI_API_BASE`.
26
-
27
- ```shell
28
- $ export OPENAI_API_KEY = your_api_key
29
- $ export OPENAI_API_BASE = your_api_base
30
- ```
31
-
32
- or you can set the api key and base in `.env` file.
33
-
34
-
35
- ## Installation
36
-
37
- 1. Clone the repository
38
-
39
- ```shell
40
- $ git clone [email protected]:paradoxtown/ai_research_assistant.git
41
- $ cd ai_research_assistant
42
- ```
43
-
44
- 2. Install the dependencies
45
-
46
- ```shell
47
- $ pip install -r requirements.txt
48
- ```
49
-
50
- 3. Export evnironment variables
51
-
52
- ```shell
53
- $ export OPENAI_API_KEY = your_api_key
54
- $ export OPENAI_API_BASE = your_api_base
55
- ```
56
- or modify the `.env` file.
57
-
58
- 4. Run the project
59
-
60
- ```shell
61
- $ python app.py
62
- ```
63
-
64
- ## TODO
65
-
66
- - [x] Switch Google Search to DuckDuckGo
67
- - [ ] Literature review
68
- - [x] Third-party API
69
- - [ ] Prettify report
70
- - [x] Add medical agent and social agent
71
- - [ ] Add option for users to customize the number of words and temperature
72
- - [ ] Copy and download buttons
73
- - [ ] Allows the user to choose the degree of research.
74
- - [ ] Wikipedia Understanding
75
-
76
- ---
77
-
78
- <div align="center">Happy researching! 🚀</div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cropinky/hana_hanak_houses/networks_fastgan.py DELETED
@@ -1,179 +0,0 @@
1
- # original implementation: https://github.com/odegeasslbc/FastGAN-pytorch/blob/main/models.py
2
- #
3
- # modified by Axel Sauer for "Projected GANs Converge Faster"
4
- #
5
- import torch.nn as nn
6
- from blocks import (InitLayer, UpBlockBig, UpBlockBigCond, UpBlockSmall, UpBlockSmallCond, SEBlock, conv2d)
7
- from huggingface_hub import PyTorchModelHubMixin
8
-
9
- def normalize_second_moment(x, dim=1, eps=1e-8):
10
- return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
11
-
12
-
13
- class DummyMapping(nn.Module):
14
- def __init__(self):
15
- super().__init__()
16
-
17
- def forward(self, z, c, **kwargs):
18
- return z.unsqueeze(1) # to fit the StyleGAN API
19
-
20
-
21
- class FastganSynthesis(nn.Module):
22
- def __init__(self, ngf=128, z_dim=256, nc=3, img_resolution=256, lite=False):
23
- super().__init__()
24
- self.img_resolution = img_resolution
25
- self.z_dim = z_dim
26
-
27
- # channel multiplier
28
- nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
29
- 512:0.25, 1024:0.125}
30
- nfc = {}
31
- for k, v in nfc_multi.items():
32
- nfc[k] = int(v*ngf)
33
-
34
- # layers
35
- self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
36
-
37
- UpBlock = UpBlockSmall if lite else UpBlockBig
38
-
39
- self.feat_8 = UpBlock(nfc[4], nfc[8])
40
- self.feat_16 = UpBlock(nfc[8], nfc[16])
41
- self.feat_32 = UpBlock(nfc[16], nfc[32])
42
- self.feat_64 = UpBlock(nfc[32], nfc[64])
43
- self.feat_128 = UpBlock(nfc[64], nfc[128])
44
- self.feat_256 = UpBlock(nfc[128], nfc[256])
45
-
46
- self.se_64 = SEBlock(nfc[4], nfc[64])
47
- self.se_128 = SEBlock(nfc[8], nfc[128])
48
- self.se_256 = SEBlock(nfc[16], nfc[256])
49
-
50
- self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
51
-
52
- if img_resolution > 256:
53
- self.feat_512 = UpBlock(nfc[256], nfc[512])
54
- self.se_512 = SEBlock(nfc[32], nfc[512])
55
- if img_resolution > 512:
56
- self.feat_1024 = UpBlock(nfc[512], nfc[1024])
57
-
58
- def forward(self, input, c, **kwargs):
59
- # map noise to hypersphere as in "Progressive Growing of GANS"
60
- input = normalize_second_moment(input[:, 0])
61
-
62
- feat_4 = self.init(input)
63
- feat_8 = self.feat_8(feat_4)
64
- feat_16 = self.feat_16(feat_8)
65
- feat_32 = self.feat_32(feat_16)
66
- feat_64 = self.se_64(feat_4, self.feat_64(feat_32))
67
- feat_128 = self.se_128(feat_8, self.feat_128(feat_64))
68
-
69
- if self.img_resolution >= 128:
70
- feat_last = feat_128
71
-
72
- if self.img_resolution >= 256:
73
- feat_last = self.se_256(feat_16, self.feat_256(feat_last))
74
-
75
- if self.img_resolution >= 512:
76
- feat_last = self.se_512(feat_32, self.feat_512(feat_last))
77
-
78
- if self.img_resolution >= 1024:
79
- feat_last = self.feat_1024(feat_last)
80
-
81
- return self.to_big(feat_last)
82
-
83
-
84
- class FastganSynthesisCond(nn.Module):
85
- def __init__(self, ngf=64, z_dim=256, nc=3, img_resolution=256, num_classes=1000, lite=False):
86
- super().__init__()
87
-
88
- self.z_dim = z_dim
89
- nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
90
- 512:0.25, 1024:0.125, 2048:0.125}
91
- nfc = {}
92
- for k, v in nfc_multi.items():
93
- nfc[k] = int(v*ngf)
94
-
95
- self.img_resolution = img_resolution
96
-
97
- self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
98
-
99
- UpBlock = UpBlockSmallCond if lite else UpBlockBigCond
100
-
101
- self.feat_8 = UpBlock(nfc[4], nfc[8], z_dim)
102
- self.feat_16 = UpBlock(nfc[8], nfc[16], z_dim)
103
- self.feat_32 = UpBlock(nfc[16], nfc[32], z_dim)
104
- self.feat_64 = UpBlock(nfc[32], nfc[64], z_dim)
105
- self.feat_128 = UpBlock(nfc[64], nfc[128], z_dim)
106
- self.feat_256 = UpBlock(nfc[128], nfc[256], z_dim)
107
-
108
- self.se_64 = SEBlock(nfc[4], nfc[64])
109
- self.se_128 = SEBlock(nfc[8], nfc[128])
110
- self.se_256 = SEBlock(nfc[16], nfc[256])
111
-
112
- self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
113
-
114
- if img_resolution > 256:
115
- self.feat_512 = UpBlock(nfc[256], nfc[512])
116
- self.se_512 = SEBlock(nfc[32], nfc[512])
117
- if img_resolution > 512:
118
- self.feat_1024 = UpBlock(nfc[512], nfc[1024])
119
-
120
- self.embed = nn.Embedding(num_classes, z_dim)
121
-
122
- def forward(self, input, c, update_emas=False):
123
- c = self.embed(c.argmax(1))
124
-
125
- # map noise to hypersphere as in "Progressive Growing of GANS"
126
- input = normalize_second_moment(input[:, 0])
127
-
128
- feat_4 = self.init(input)
129
- feat_8 = self.feat_8(feat_4, c)
130
- feat_16 = self.feat_16(feat_8, c)
131
- feat_32 = self.feat_32(feat_16, c)
132
- feat_64 = self.se_64(feat_4, self.feat_64(feat_32, c))
133
- feat_128 = self.se_128(feat_8, self.feat_128(feat_64, c))
134
-
135
- if self.img_resolution >= 128:
136
- feat_last = feat_128
137
-
138
- if self.img_resolution >= 256:
139
- feat_last = self.se_256(feat_16, self.feat_256(feat_last, c))
140
-
141
- if self.img_resolution >= 512:
142
- feat_last = self.se_512(feat_32, self.feat_512(feat_last, c))
143
-
144
- if self.img_resolution >= 1024:
145
- feat_last = self.feat_1024(feat_last, c)
146
-
147
- return self.to_big(feat_last)
148
-
149
-
150
- class MyGenerator(nn.Module, PyTorchModelHubMixin):
151
- def __init__(
152
- self,
153
- z_dim=256,
154
- c_dim=0,
155
- w_dim=0,
156
- img_resolution=256,
157
- img_channels=3,
158
- ngf=128,
159
- cond=0,
160
- mapping_kwargs={},
161
- synthesis_kwargs={}
162
- ):
163
- super().__init__()
164
- #self.config = kwargs.pop("config", None)
165
- self.z_dim = z_dim
166
- self.c_dim = c_dim
167
- self.w_dim = w_dim
168
- self.img_resolution = img_resolution
169
- self.img_channels = img_channels
170
-
171
- # Mapping and Synthesis Networks
172
- self.mapping = DummyMapping() # to fit the StyleGAN API
173
- Synthesis = FastganSynthesisCond if cond else FastganSynthesis
174
- self.synthesis = Synthesis(ngf=ngf, z_dim=z_dim, nc=img_channels, img_resolution=img_resolution, **synthesis_kwargs)
175
-
176
- def forward(self, z, c, **kwargs):
177
- w = self.mapping(z, c)
178
- img = self.synthesis(w, c)
179
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/McIdasImagePlugin.py DELETED
@@ -1,75 +0,0 @@
1
- #
2
- # The Python Imaging Library.
3
- # $Id$
4
- #
5
- # Basic McIdas support for PIL
6
- #
7
- # History:
8
- # 1997-05-05 fl Created (8-bit images only)
9
- # 2009-03-08 fl Added 16/32-bit support.
10
- #
11
- # Thanks to Richard Jones and Craig Swank for specs and samples.
12
- #
13
- # Copyright (c) Secret Labs AB 1997.
14
- # Copyright (c) Fredrik Lundh 1997.
15
- #
16
- # See the README file for information on usage and redistribution.
17
- #
18
-
19
- import struct
20
-
21
- from . import Image, ImageFile
22
-
23
-
24
- def _accept(s):
25
- return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
26
-
27
-
28
- ##
29
- # Image plugin for McIdas area images.
30
-
31
-
32
- class McIdasImageFile(ImageFile.ImageFile):
33
- format = "MCIDAS"
34
- format_description = "McIdas area file"
35
-
36
- def _open(self):
37
- # parse area file directory
38
- s = self.fp.read(256)
39
- if not _accept(s) or len(s) != 256:
40
- msg = "not an McIdas area file"
41
- raise SyntaxError(msg)
42
-
43
- self.area_descriptor_raw = s
44
- self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
45
-
46
- # get mode
47
- if w[11] == 1:
48
- mode = rawmode = "L"
49
- elif w[11] == 2:
50
- # FIXME: add memory map support
51
- mode = "I"
52
- rawmode = "I;16B"
53
- elif w[11] == 4:
54
- # FIXME: add memory map support
55
- mode = "I"
56
- rawmode = "I;32B"
57
- else:
58
- msg = "unsupported McIdas format"
59
- raise SyntaxError(msg)
60
-
61
- self.mode = mode
62
- self._size = w[10], w[9]
63
-
64
- offset = w[34] + w[15]
65
- stride = w[15] + w[10] * w[11] * w[14]
66
-
67
- self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
68
-
69
-
70
- # --------------------------------------------------------------------
71
- # registry
72
-
73
- Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
74
-
75
- # no default extension
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_termui_impl.py DELETED
@@ -1,739 +0,0 @@
1
- """
2
- This module contains implementations for the termui module. To keep the
3
- import time of Click down, some infrequently used functionality is
4
- placed in this module and only imported as needed.
5
- """
6
- import contextlib
7
- import math
8
- import os
9
- import sys
10
- import time
11
- import typing as t
12
- from gettext import gettext as _
13
- from io import StringIO
14
- from types import TracebackType
15
-
16
- from ._compat import _default_text_stdout
17
- from ._compat import CYGWIN
18
- from ._compat import get_best_encoding
19
- from ._compat import isatty
20
- from ._compat import open_stream
21
- from ._compat import strip_ansi
22
- from ._compat import term_len
23
- from ._compat import WIN
24
- from .exceptions import ClickException
25
- from .utils import echo
26
-
27
- V = t.TypeVar("V")
28
-
29
- if os.name == "nt":
30
- BEFORE_BAR = "\r"
31
- AFTER_BAR = "\n"
32
- else:
33
- BEFORE_BAR = "\r\033[?25l"
34
- AFTER_BAR = "\033[?25h\n"
35
-
36
-
37
- class ProgressBar(t.Generic[V]):
38
- def __init__(
39
- self,
40
- iterable: t.Optional[t.Iterable[V]],
41
- length: t.Optional[int] = None,
42
- fill_char: str = "#",
43
- empty_char: str = " ",
44
- bar_template: str = "%(bar)s",
45
- info_sep: str = " ",
46
- show_eta: bool = True,
47
- show_percent: t.Optional[bool] = None,
48
- show_pos: bool = False,
49
- item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
50
- label: t.Optional[str] = None,
51
- file: t.Optional[t.TextIO] = None,
52
- color: t.Optional[bool] = None,
53
- update_min_steps: int = 1,
54
- width: int = 30,
55
- ) -> None:
56
- self.fill_char = fill_char
57
- self.empty_char = empty_char
58
- self.bar_template = bar_template
59
- self.info_sep = info_sep
60
- self.show_eta = show_eta
61
- self.show_percent = show_percent
62
- self.show_pos = show_pos
63
- self.item_show_func = item_show_func
64
- self.label: str = label or ""
65
-
66
- if file is None:
67
- file = _default_text_stdout()
68
-
69
- # There are no standard streams attached to write to. For example,
70
- # pythonw on Windows.
71
- if file is None:
72
- file = StringIO()
73
-
74
- self.file = file
75
- self.color = color
76
- self.update_min_steps = update_min_steps
77
- self._completed_intervals = 0
78
- self.width: int = width
79
- self.autowidth: bool = width == 0
80
-
81
- if length is None:
82
- from operator import length_hint
83
-
84
- length = length_hint(iterable, -1)
85
-
86
- if length == -1:
87
- length = None
88
- if iterable is None:
89
- if length is None:
90
- raise TypeError("iterable or length is required")
91
- iterable = t.cast(t.Iterable[V], range(length))
92
- self.iter: t.Iterable[V] = iter(iterable)
93
- self.length = length
94
- self.pos = 0
95
- self.avg: t.List[float] = []
96
- self.last_eta: float
97
- self.start: float
98
- self.start = self.last_eta = time.time()
99
- self.eta_known: bool = False
100
- self.finished: bool = False
101
- self.max_width: t.Optional[int] = None
102
- self.entered: bool = False
103
- self.current_item: t.Optional[V] = None
104
- self.is_hidden: bool = not isatty(self.file)
105
- self._last_line: t.Optional[str] = None
106
-
107
- def __enter__(self) -> "ProgressBar[V]":
108
- self.entered = True
109
- self.render_progress()
110
- return self
111
-
112
- def __exit__(
113
- self,
114
- exc_type: t.Optional[t.Type[BaseException]],
115
- exc_value: t.Optional[BaseException],
116
- tb: t.Optional[TracebackType],
117
- ) -> None:
118
- self.render_finish()
119
-
120
- def __iter__(self) -> t.Iterator[V]:
121
- if not self.entered:
122
- raise RuntimeError("You need to use progress bars in a with block.")
123
- self.render_progress()
124
- return self.generator()
125
-
126
- def __next__(self) -> V:
127
- # Iteration is defined in terms of a generator function,
128
- # returned by iter(self); use that to define next(). This works
129
- # because `self.iter` is an iterable consumed by that generator,
130
- # so it is re-entry safe. Calling `next(self.generator())`
131
- # twice works and does "what you want".
132
- return next(iter(self))
133
-
134
- def render_finish(self) -> None:
135
- if self.is_hidden:
136
- return
137
- self.file.write(AFTER_BAR)
138
- self.file.flush()
139
-
140
- @property
141
- def pct(self) -> float:
142
- if self.finished:
143
- return 1.0
144
- return min(self.pos / (float(self.length or 1) or 1), 1.0)
145
-
146
- @property
147
- def time_per_iteration(self) -> float:
148
- if not self.avg:
149
- return 0.0
150
- return sum(self.avg) / float(len(self.avg))
151
-
152
- @property
153
- def eta(self) -> float:
154
- if self.length is not None and not self.finished:
155
- return self.time_per_iteration * (self.length - self.pos)
156
- return 0.0
157
-
158
- def format_eta(self) -> str:
159
- if self.eta_known:
160
- t = int(self.eta)
161
- seconds = t % 60
162
- t //= 60
163
- minutes = t % 60
164
- t //= 60
165
- hours = t % 24
166
- t //= 24
167
- if t > 0:
168
- return f"{t}d {hours:02}:{minutes:02}:{seconds:02}"
169
- else:
170
- return f"{hours:02}:{minutes:02}:{seconds:02}"
171
- return ""
172
-
173
- def format_pos(self) -> str:
174
- pos = str(self.pos)
175
- if self.length is not None:
176
- pos += f"/{self.length}"
177
- return pos
178
-
179
- def format_pct(self) -> str:
180
- return f"{int(self.pct * 100): 4}%"[1:]
181
-
182
- def format_bar(self) -> str:
183
- if self.length is not None:
184
- bar_length = int(self.pct * self.width)
185
- bar = self.fill_char * bar_length
186
- bar += self.empty_char * (self.width - bar_length)
187
- elif self.finished:
188
- bar = self.fill_char * self.width
189
- else:
190
- chars = list(self.empty_char * (self.width or 1))
191
- if self.time_per_iteration != 0:
192
- chars[
193
- int(
194
- (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
195
- * self.width
196
- )
197
- ] = self.fill_char
198
- bar = "".join(chars)
199
- return bar
200
-
201
- def format_progress_line(self) -> str:
202
- show_percent = self.show_percent
203
-
204
- info_bits = []
205
- if self.length is not None and show_percent is None:
206
- show_percent = not self.show_pos
207
-
208
- if self.show_pos:
209
- info_bits.append(self.format_pos())
210
- if show_percent:
211
- info_bits.append(self.format_pct())
212
- if self.show_eta and self.eta_known and not self.finished:
213
- info_bits.append(self.format_eta())
214
- if self.item_show_func is not None:
215
- item_info = self.item_show_func(self.current_item)
216
- if item_info is not None:
217
- info_bits.append(item_info)
218
-
219
- return (
220
- self.bar_template
221
- % {
222
- "label": self.label,
223
- "bar": self.format_bar(),
224
- "info": self.info_sep.join(info_bits),
225
- }
226
- ).rstrip()
227
-
228
- def render_progress(self) -> None:
229
- import shutil
230
-
231
- if self.is_hidden:
232
- # Only output the label as it changes if the output is not a
233
- # TTY. Use file=stderr if you expect to be piping stdout.
234
- if self._last_line != self.label:
235
- self._last_line = self.label
236
- echo(self.label, file=self.file, color=self.color)
237
-
238
- return
239
-
240
- buf = []
241
- # Update width in case the terminal has been resized
242
- if self.autowidth:
243
- old_width = self.width
244
- self.width = 0
245
- clutter_length = term_len(self.format_progress_line())
246
- new_width = max(0, shutil.get_terminal_size().columns - clutter_length)
247
- if new_width < old_width:
248
- buf.append(BEFORE_BAR)
249
- buf.append(" " * self.max_width) # type: ignore
250
- self.max_width = new_width
251
- self.width = new_width
252
-
253
- clear_width = self.width
254
- if self.max_width is not None:
255
- clear_width = self.max_width
256
-
257
- buf.append(BEFORE_BAR)
258
- line = self.format_progress_line()
259
- line_len = term_len(line)
260
- if self.max_width is None or self.max_width < line_len:
261
- self.max_width = line_len
262
-
263
- buf.append(line)
264
- buf.append(" " * (clear_width - line_len))
265
- line = "".join(buf)
266
- # Render the line only if it changed.
267
-
268
- if line != self._last_line:
269
- self._last_line = line
270
- echo(line, file=self.file, color=self.color, nl=False)
271
- self.file.flush()
272
-
273
- def make_step(self, n_steps: int) -> None:
274
- self.pos += n_steps
275
- if self.length is not None and self.pos >= self.length:
276
- self.finished = True
277
-
278
- if (time.time() - self.last_eta) < 1.0:
279
- return
280
-
281
- self.last_eta = time.time()
282
-
283
- # self.avg is a rolling list of length <= 7 of steps where steps are
284
- # defined as time elapsed divided by the total progress through
285
- # self.length.
286
- if self.pos:
287
- step = (time.time() - self.start) / self.pos
288
- else:
289
- step = time.time() - self.start
290
-
291
- self.avg = self.avg[-6:] + [step]
292
-
293
- self.eta_known = self.length is not None
294
-
295
- def update(self, n_steps: int, current_item: t.Optional[V] = None) -> None:
296
- """Update the progress bar by advancing a specified number of
297
- steps, and optionally set the ``current_item`` for this new
298
- position.
299
-
300
- :param n_steps: Number of steps to advance.
301
- :param current_item: Optional item to set as ``current_item``
302
- for the updated position.
303
-
304
- .. versionchanged:: 8.0
305
- Added the ``current_item`` optional parameter.
306
-
307
- .. versionchanged:: 8.0
308
- Only render when the number of steps meets the
309
- ``update_min_steps`` threshold.
310
- """
311
- if current_item is not None:
312
- self.current_item = current_item
313
-
314
- self._completed_intervals += n_steps
315
-
316
- if self._completed_intervals >= self.update_min_steps:
317
- self.make_step(self._completed_intervals)
318
- self.render_progress()
319
- self._completed_intervals = 0
320
-
321
- def finish(self) -> None:
322
- self.eta_known = False
323
- self.current_item = None
324
- self.finished = True
325
-
326
- def generator(self) -> t.Iterator[V]:
327
- """Return a generator which yields the items added to the bar
328
- during construction, and updates the progress bar *after* the
329
- yielded block returns.
330
- """
331
- # WARNING: the iterator interface for `ProgressBar` relies on
332
- # this and only works because this is a simple generator which
333
- # doesn't create or manage additional state. If this function
334
- # changes, the impact should be evaluated both against
335
- # `iter(bar)` and `next(bar)`. `next()` in particular may call
336
- # `self.generator()` repeatedly, and this must remain safe in
337
- # order for that interface to work.
338
- if not self.entered:
339
- raise RuntimeError("You need to use progress bars in a with block.")
340
-
341
- if self.is_hidden:
342
- yield from self.iter
343
- else:
344
- for rv in self.iter:
345
- self.current_item = rv
346
-
347
- # This allows show_item_func to be updated before the
348
- # item is processed. Only trigger at the beginning of
349
- # the update interval.
350
- if self._completed_intervals == 0:
351
- self.render_progress()
352
-
353
- yield rv
354
- self.update(1)
355
-
356
- self.finish()
357
- self.render_progress()
358
-
359
-
360
- def pager(generator: t.Iterable[str], color: t.Optional[bool] = None) -> None:
361
- """Decide what method to use for paging through text."""
362
- stdout = _default_text_stdout()
363
-
364
- # There are no standard streams attached to write to. For example,
365
- # pythonw on Windows.
366
- if stdout is None:
367
- stdout = StringIO()
368
-
369
- if not isatty(sys.stdin) or not isatty(stdout):
370
- return _nullpager(stdout, generator, color)
371
- pager_cmd = (os.environ.get("PAGER", None) or "").strip()
372
- if pager_cmd:
373
- if WIN:
374
- return _tempfilepager(generator, pager_cmd, color)
375
- return _pipepager(generator, pager_cmd, color)
376
- if os.environ.get("TERM") in ("dumb", "emacs"):
377
- return _nullpager(stdout, generator, color)
378
- if WIN or sys.platform.startswith("os2"):
379
- return _tempfilepager(generator, "more <", color)
380
- if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
381
- return _pipepager(generator, "less", color)
382
-
383
- import tempfile
384
-
385
- fd, filename = tempfile.mkstemp()
386
- os.close(fd)
387
- try:
388
- if hasattr(os, "system") and os.system(f'more "{filename}"') == 0:
389
- return _pipepager(generator, "more", color)
390
- return _nullpager(stdout, generator, color)
391
- finally:
392
- os.unlink(filename)
393
-
394
-
395
- def _pipepager(generator: t.Iterable[str], cmd: str, color: t.Optional[bool]) -> None:
396
- """Page through text by feeding it to another program. Invoking a
397
- pager through this might support colors.
398
- """
399
- import subprocess
400
-
401
- env = dict(os.environ)
402
-
403
- # If we're piping to less we might support colors under the
404
- # condition that
405
- cmd_detail = cmd.rsplit("/", 1)[-1].split()
406
- if color is None and cmd_detail[0] == "less":
407
- less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}"
408
- if not less_flags:
409
- env["LESS"] = "-R"
410
- color = True
411
- elif "r" in less_flags or "R" in less_flags:
412
- color = True
413
-
414
- c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
415
- stdin = t.cast(t.BinaryIO, c.stdin)
416
- encoding = get_best_encoding(stdin)
417
- try:
418
- for text in generator:
419
- if not color:
420
- text = strip_ansi(text)
421
-
422
- stdin.write(text.encode(encoding, "replace"))
423
- except (OSError, KeyboardInterrupt):
424
- pass
425
- else:
426
- stdin.close()
427
-
428
- # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
429
- # search or other commands inside less).
430
- #
431
- # That means when the user hits ^C, the parent process (click) terminates,
432
- # but less is still alive, paging the output and messing up the terminal.
433
- #
434
- # If the user wants to make the pager exit on ^C, they should set
435
- # `LESS='-K'`. It's not our decision to make.
436
- while True:
437
- try:
438
- c.wait()
439
- except KeyboardInterrupt:
440
- pass
441
- else:
442
- break
443
-
444
-
445
- def _tempfilepager(
446
- generator: t.Iterable[str], cmd: str, color: t.Optional[bool]
447
- ) -> None:
448
- """Page through text by invoking a program on a temporary file."""
449
- import tempfile
450
-
451
- fd, filename = tempfile.mkstemp()
452
- # TODO: This never terminates if the passed generator never terminates.
453
- text = "".join(generator)
454
- if not color:
455
- text = strip_ansi(text)
456
- encoding = get_best_encoding(sys.stdout)
457
- with open_stream(filename, "wb")[0] as f:
458
- f.write(text.encode(encoding))
459
- try:
460
- os.system(f'{cmd} "{filename}"')
461
- finally:
462
- os.close(fd)
463
- os.unlink(filename)
464
-
465
-
466
- def _nullpager(
467
- stream: t.TextIO, generator: t.Iterable[str], color: t.Optional[bool]
468
- ) -> None:
469
- """Simply print unformatted text. This is the ultimate fallback."""
470
- for text in generator:
471
- if not color:
472
- text = strip_ansi(text)
473
- stream.write(text)
474
-
475
-
476
- class Editor:
477
- def __init__(
478
- self,
479
- editor: t.Optional[str] = None,
480
- env: t.Optional[t.Mapping[str, str]] = None,
481
- require_save: bool = True,
482
- extension: str = ".txt",
483
- ) -> None:
484
- self.editor = editor
485
- self.env = env
486
- self.require_save = require_save
487
- self.extension = extension
488
-
489
- def get_editor(self) -> str:
490
- if self.editor is not None:
491
- return self.editor
492
- for key in "VISUAL", "EDITOR":
493
- rv = os.environ.get(key)
494
- if rv:
495
- return rv
496
- if WIN:
497
- return "notepad"
498
- for editor in "sensible-editor", "vim", "nano":
499
- if os.system(f"which {editor} >/dev/null 2>&1") == 0:
500
- return editor
501
- return "vi"
502
-
503
- def edit_file(self, filename: str) -> None:
504
- import subprocess
505
-
506
- editor = self.get_editor()
507
- environ: t.Optional[t.Dict[str, str]] = None
508
-
509
- if self.env:
510
- environ = os.environ.copy()
511
- environ.update(self.env)
512
-
513
- try:
514
- c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True)
515
- exit_code = c.wait()
516
- if exit_code != 0:
517
- raise ClickException(
518
- _("{editor}: Editing failed").format(editor=editor)
519
- )
520
- except OSError as e:
521
- raise ClickException(
522
- _("{editor}: Editing failed: {e}").format(editor=editor, e=e)
523
- ) from e
524
-
525
- def edit(self, text: t.Optional[t.AnyStr]) -> t.Optional[t.AnyStr]:
526
- import tempfile
527
-
528
- if not text:
529
- data = b""
530
- elif isinstance(text, (bytes, bytearray)):
531
- data = text
532
- else:
533
- if text and not text.endswith("\n"):
534
- text += "\n"
535
-
536
- if WIN:
537
- data = text.replace("\n", "\r\n").encode("utf-8-sig")
538
- else:
539
- data = text.encode("utf-8")
540
-
541
- fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
542
- f: t.BinaryIO
543
-
544
- try:
545
- with os.fdopen(fd, "wb") as f:
546
- f.write(data)
547
-
548
- # If the filesystem resolution is 1 second, like Mac OS
549
- # 10.12 Extended, or 2 seconds, like FAT32, and the editor
550
- # closes very fast, require_save can fail. Set the modified
551
- # time to be 2 seconds in the past to work around this.
552
- os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))
553
- # Depending on the resolution, the exact value might not be
554
- # recorded, so get the new recorded value.
555
- timestamp = os.path.getmtime(name)
556
-
557
- self.edit_file(name)
558
-
559
- if self.require_save and os.path.getmtime(name) == timestamp:
560
- return None
561
-
562
- with open(name, "rb") as f:
563
- rv = f.read()
564
-
565
- if isinstance(text, (bytes, bytearray)):
566
- return rv
567
-
568
- return rv.decode("utf-8-sig").replace("\r\n", "\n") # type: ignore
569
- finally:
570
- os.unlink(name)
571
-
572
-
573
- def open_url(url: str, wait: bool = False, locate: bool = False) -> int:
574
- import subprocess
575
-
576
- def _unquote_file(url: str) -> str:
577
- from urllib.parse import unquote
578
-
579
- if url.startswith("file://"):
580
- url = unquote(url[7:])
581
-
582
- return url
583
-
584
- if sys.platform == "darwin":
585
- args = ["open"]
586
- if wait:
587
- args.append("-W")
588
- if locate:
589
- args.append("-R")
590
- args.append(_unquote_file(url))
591
- null = open("/dev/null", "w")
592
- try:
593
- return subprocess.Popen(args, stderr=null).wait()
594
- finally:
595
- null.close()
596
- elif WIN:
597
- if locate:
598
- url = _unquote_file(url.replace('"', ""))
599
- args = f'explorer /select,"{url}"'
600
- else:
601
- url = url.replace('"', "")
602
- wait_str = "/WAIT" if wait else ""
603
- args = f'start {wait_str} "" "{url}"'
604
- return os.system(args)
605
- elif CYGWIN:
606
- if locate:
607
- url = os.path.dirname(_unquote_file(url).replace('"', ""))
608
- args = f'cygstart "{url}"'
609
- else:
610
- url = url.replace('"', "")
611
- wait_str = "-w" if wait else ""
612
- args = f'cygstart {wait_str} "{url}"'
613
- return os.system(args)
614
-
615
- try:
616
- if locate:
617
- url = os.path.dirname(_unquote_file(url)) or "."
618
- else:
619
- url = _unquote_file(url)
620
- c = subprocess.Popen(["xdg-open", url])
621
- if wait:
622
- return c.wait()
623
- return 0
624
- except OSError:
625
- if url.startswith(("http://", "https://")) and not locate and not wait:
626
- import webbrowser
627
-
628
- webbrowser.open(url)
629
- return 0
630
- return 1
631
-
632
-
633
- def _translate_ch_to_exc(ch: str) -> t.Optional[BaseException]:
634
- if ch == "\x03":
635
- raise KeyboardInterrupt()
636
-
637
- if ch == "\x04" and not WIN: # Unix-like, Ctrl+D
638
- raise EOFError()
639
-
640
- if ch == "\x1a" and WIN: # Windows, Ctrl+Z
641
- raise EOFError()
642
-
643
- return None
644
-
645
-
646
- if WIN:
647
- import msvcrt
648
-
649
- @contextlib.contextmanager
650
- def raw_terminal() -> t.Iterator[int]:
651
- yield -1
652
-
653
- def getchar(echo: bool) -> str:
654
- # The function `getch` will return a bytes object corresponding to
655
- # the pressed character. Since Windows 10 build 1803, it will also
656
- # return \x00 when called a second time after pressing a regular key.
657
- #
658
- # `getwch` does not share this probably-bugged behavior. Moreover, it
659
- # returns a Unicode object by default, which is what we want.
660
- #
661
- # Either of these functions will return \x00 or \xe0 to indicate
662
- # a special key, and you need to call the same function again to get
663
- # the "rest" of the code. The fun part is that \u00e0 is
664
- # "latin small letter a with grave", so if you type that on a French
665
- # keyboard, you _also_ get a \xe0.
666
- # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
667
- # resulting Unicode string reads as "a with grave" + "capital H".
668
- # This is indistinguishable from when the user actually types
669
- # "a with grave" and then "capital H".
670
- #
671
- # When \xe0 is returned, we assume it's part of a special-key sequence
672
- # and call `getwch` again, but that means that when the user types
673
- # the \u00e0 character, `getchar` doesn't return until a second
674
- # character is typed.
675
- # The alternative is returning immediately, but that would mess up
676
- # cross-platform handling of arrow keys and others that start with
677
- # \xe0. Another option is using `getch`, but then we can't reliably
678
- # read non-ASCII characters, because return values of `getch` are
679
- # limited to the current 8-bit codepage.
680
- #
681
- # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
682
- # is doing the right thing in more situations than with `getch`.
683
- func: t.Callable[[], str]
684
-
685
- if echo:
686
- func = msvcrt.getwche # type: ignore
687
- else:
688
- func = msvcrt.getwch # type: ignore
689
-
690
- rv = func()
691
-
692
- if rv in ("\x00", "\xe0"):
693
- # \x00 and \xe0 are control characters that indicate special key,
694
- # see above.
695
- rv += func()
696
-
697
- _translate_ch_to_exc(rv)
698
- return rv
699
-
700
- else:
701
- import tty
702
- import termios
703
-
704
- @contextlib.contextmanager
705
- def raw_terminal() -> t.Iterator[int]:
706
- f: t.Optional[t.TextIO]
707
- fd: int
708
-
709
- if not isatty(sys.stdin):
710
- f = open("/dev/tty")
711
- fd = f.fileno()
712
- else:
713
- fd = sys.stdin.fileno()
714
- f = None
715
-
716
- try:
717
- old_settings = termios.tcgetattr(fd)
718
-
719
- try:
720
- tty.setraw(fd)
721
- yield fd
722
- finally:
723
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
724
- sys.stdout.flush()
725
-
726
- if f is not None:
727
- f.close()
728
- except termios.error:
729
- pass
730
-
731
- def getchar(echo: bool) -> str:
732
- with raw_terminal() as fd:
733
- ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace")
734
-
735
- if echo and isatty(sys.stdout):
736
- sys.stdout.write(ch)
737
-
738
- _translate_ch_to_exc(ch)
739
- return ch