parquet-converter commited on
Commit
5cb8a2b
·
1 Parent(s): d9564f5

Update parquet files (step 3 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download free serato skin for virtual dj The ultimate guide for beginners.md +0 -148
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2019 Crashing The Causes and The Solutions.md +0 -26
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Version Of Corel Draw LINK.md +0 -26
  4. spaces/1gistliPinn/ChatGPT4/Examples/Ashrae Standard 170 Pdf 17l How to Download and Apply the Latest Addendum.md +0 -15
  5. spaces/1gistliPinn/ChatGPT4/Examples/Disk Drill Pro 3.6.918 Crack Activation Code ((FREE)) Free Download 2019.md +0 -29
  6. spaces/1gistliPinn/ChatGPT4/Examples/Drive Club Pc Game Download Kickass 61.md +0 -11
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dirt Rally 2.0 Apk The Best Mods and Add-Ons for More Fun and Challenge.md +0 -121
  8. spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_heun_discrete.py +0 -254
  9. spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_vq_diffusion.py +0 -496
  10. spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/speaker.py +0 -40
  11. spaces/4th3n4/TraDeX/app.py +0 -590
  12. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/indexed_datasets.py +0 -77
  13. spaces/AISuperheroes/03GR-Chatbot-Memory/README.md +0 -13
  14. spaces/ASJMO/freegpt/g4f/Provider/Provider.py +0 -16
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py +0 -19
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192.py +0 -172
  17. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_mixup.py +0 -17
  18. spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Theb.py +0 -97
  19. spaces/AchyuthGamer/text-to-speech-client/assets/index-5644c887.css +0 -1
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/AddChildrenMap.js +0 -6
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/TableSetInteractive.js +0 -19
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/simpledropdownlist/SimpleDropDownList.js +0 -27
  23. spaces/Alashazam/Harmony/app.py +0 -45
  24. spaces/AlhitawiMohammed22/HTD_HTR/app.py +0 -145
  25. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/eval/__init__.py +0 -0
  26. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models_face.py +0 -819
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/paradigms.md +0 -54
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/custom_pipeline_overview.md +0 -56
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/reinforcement_learning/run_diffuser_locomotion.py +0 -59
  30. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -935
  31. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_dependencies.py +0 -39
  32. spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py +0 -13
  33. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py +0 -2
  34. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/shared.py +0 -275
  35. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/utils.py +0 -20
  36. spaces/Anonymous-sub/Rerender/ControlNet/gradio_hed2image.py +0 -98
  37. spaces/Ariharasudhan/YoloV5/utils/aws/__init__.py +0 -0
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/intranges.py +0 -54
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/monkey.py +0 -165
  40. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py +0 -116
  41. spaces/AzinZ/vitscn/preprocess.py +0 -25
  42. spaces/AzumaSeren100/XuanShen-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md +0 -57
  43. spaces/Bambicita/rvc-models/infer_pack/attentions.py +0 -417
  44. spaces/Benson/text-generation/Examples/2.0tamil Pelcula Descargar.md +0 -157
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/unix.py +0 -194
  46. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py +0 -802
  47. spaces/Bijoy2001/real-time-voice-recognition/app.py +0 -20
  48. spaces/BilalSardar/YoutubeVideoLink-To-MCQs-Generation/app.py +0 -320
  49. spaces/BridgeTower/bridgetower-video-search/README.md +0 -12
  50. spaces/CVPR/LIVE/pybind11/tests/test_tagbased_polymorphic.cpp +0 -142
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download free serato skin for virtual dj The ultimate guide for beginners.md DELETED
@@ -1,148 +0,0 @@
1
-
2
- <h1>How to Download Free Serato Skin for Virtual DJ</h1>
3
- <p>If you are a fan of <strong>Virtual DJ</strong>, one of the most popular and versatile DJ software in the market, you might be interested in changing its look and feel with a different skin. A skin is a graphical interface that modifies the appearance and layout of Virtual DJ, giving it a new style and functionality.</p>
4
- <h2>download free serato skin for virtual dj</h2><br /><p><b><b>Download File</b> ===== <a href="https://byltly.com/2uKzUm">https://byltly.com/2uKzUm</a></b></p><br /><br />
5
- <p>One of the most sought-after skins for Virtual DJ is the <strong>Serato Skin</strong>, which mimics the design and features of <strong>Serato DJ Pro</strong>, another leading DJ software that is widely used by professional DJs. The Serato Skin for Virtual DJ gives you the best of both worlds, combining the power and flexibility of Virtual DJ with the sleek and intuitive interface of Serato DJ Pro.</p>
6
- <p>In this article, we will show you how to download free Serato Skin for Virtual DJ, how to install it on your computer, and how to use it to enhance your mixing and scratching skills. By following these simple steps, you will be able to transform your Virtual DJ into a Serato-like experience that will impress your audience and yourself.</p>
7
- <h2>How to Download Serato Skin for Virtual DJ</h2>
8
- <p>The first step to get Serato Skin for Virtual DJ is to find a reliable source where you can download it safely and legally. There are many websites that offer free downloads of skins for Virtual DJ, but not all of them are trustworthy or updated. Some of them may contain viruses, malware, or broken links that can harm your computer or compromise your privacy.</p>
9
- <p>One of the websites that we recommend for downloading free Serato Skin for Virtual DJ is <a href="https://www.djsonatty.com/">Sonatty</a>, a blog that provides useful information and resources for DJs. Sonatty has several versions of Serato Skin for Virtual DJ available, including Serato DJ Pro 2.5, Serato DJ Pro 2.0, and more. You can also find other skins, plugins, effects, samples, and tutorials on Sonatty that can help you improve your performance as a DJ.</p>
10
- <h3>Step 1: Find a reliable source for downloading Serato Skin for Virtual DJ</h3>
11
- <p>To download free Serato Skin for Virtual DJ from Sonatty, you need to visit their website and navigate to the <a href="https://www.djsonatty.com/search/label/Plugins">Plugins</a> section. There you will see a list of posts that contain links to different skins, plugins, and effects for Virtual DJ. Look for the post that matches the version of Serato Skin for Virtual DJ that you want to download.</p>
12
- <h3>Step 2: Choose the version of Serato Skin for Virtual DJ that suits your needs</h3>
13
- <p>Depending on your preference and compatibility, you can choose between different versions of Serato Skin for Virtual DJ that have different features and requirements. For example, if you have <strong>Virtual DJ 2021</strong>, you can download <strong>Serato DJ Pro 2.5</strong>, which is the latest version of Serato Skin that has a premium edition with more options and functions. If you have <strong>Virtual DJ 2018 or 2020</strong>, you can download <strong>Serato DJ Pro 2.0</strong>, which is an older version of Serato Skin that still works well with these versions of Virtual DJ. You can also find other versions of Serato Skin on Sonatty or other websites if you have different versions of Virtual DJ.</p>
14
- <h3>Step 3: Download and extract the Serato Skin for Virtual DJ file</h3>
15
- <p>Once you have chosen the version of Serato Skin for Virtual DJ that you want to download, click on the <strong>[Download]</strong> button on the post that contains it. This will take you to another page where you will see a link to download the file from Google Drive. Click on the link and then click on <strong>Download anyway</strong> to start downloading the file.</p>
16
- <p>The file will be in a compressed format (.zip or .rar) that you need to extract using a program like WinRAR or WinZip. To extract the file, right-click on it and select <strong>Extract here</strong> or <strong>Extract to...</strong>. This will create a folder with the same name as the file that contains the skin file (.zip) and some instructions (.txt).</p>
17
- <p>How to get serato skin for virtual dj without paying<br />
18
- Serato skin for virtual dj 8 pro free download<br />
19
- Best serato skin for virtual dj 7 download link<br />
20
- Where to find serato skin for virtual dj 2021<br />
21
- Serato skin for virtual dj zip file download<br />
22
- Serato skin for virtual dj mac free download<br />
23
- Serato skin for virtual dj windows 10 download<br />
24
- Serato scratch live skin for virtual dj free download<br />
25
- Serato dj pro skin for virtual dj free download<br />
26
- Serato video skin for virtual dj free download<br />
27
- Serato sl3 skin for virtual dj free download<br />
28
- Serato sl4 skin for virtual dj free download<br />
29
- Serato rane 62 skin for virtual dj free download<br />
30
- Serato rane 57 skin for virtual dj free download<br />
31
- Serato rane 12 skin for virtual dj free download<br />
32
- Serato rane 72 skin for virtual dj free download<br />
33
- Serato pioneer ddj sx2 skin for virtual dj free download<br />
34
- Serato pioneer ddj sx3 skin for virtual dj free download<br />
35
- Serato pioneer ddj sz2 skin for virtual dj free download<br />
36
- Serato pioneer ddj sz3 skin for virtual dj free download<br />
37
- Serato pioneer ddj sb3 skin for virtual dj free download<br />
38
- Serato pioneer ddj sr2 skin for virtual dj free download<br />
39
- Serato pioneer ddj sr3 skin for virtual dj free download<br />
40
- Serato pioneer ddj 1000srt skin for virtual dj free download<br />
41
- Serato pioneer ddj 800srt skin for virtual dj free download<br />
42
- Serato numark mixtrack pro 3 skin for virtual dj free download<br />
43
- Serato numark mixtrack platinum fx skin for virtual dj free download<br />
44
- Serato numark ns6ii skin for virtual dj free download<br />
45
- Serato numark nvii skin for virtual dj free download<br />
46
- Serato denon mcx8000 skin for virtual dj free download<br />
47
- Serato denon mc7000 skin for virtual dj free download<br />
48
- Serato denon prime 4 skin for virtual dj free download<br />
49
- Serato denon prime 2 skin for virtual dj free download<br />
50
- Serato reloop mixon 4 skin for virtual dj free download<br />
51
- Serato reloop beatpad 2 skin for virtual dj free download<br />
52
- Serato reloop touch skin for virtual dj free download<br />
53
- Serato reloop elite skin for virtual dj free download<br />
54
- Serato hercules inpulse 500 skin for virtual dj free download<br />
55
- Serato hercules inpulse 300 skin for virtual dj free download<br />
56
- Serato hercules inpulse 200 skin for virtual dj free download<br />
57
- Serato hercules starlight skin for virtual dj free download<br />
58
- Serato hercules jogvision skin for virtual dj free download<br />
59
- Serato traktor kontrol s4 mk3 skin for virtual dj free download<br />
60
- Serato traktor kontrol s2 mk3 skin for virtual dj free download<br />
61
- Serato traktor kontrol s8 mk2 skin for virtual dj free download<br />
62
- Serato traktor kontrol z2 mk2 skin for virtual dj free download<br />
63
- Free serato skins pack for all versions of virtual dj <br />
64
- How to install serato skins on your computer and use them with virtual dj <br />
65
- How to customize your own serato skins and share them with other users of virtual dj <br />
66
- How to troubleshoot common issues with serato skins and fix them on your system</p>
67
- <h2>How to Install Serato Skin for Virtual DJ</h2>
68
- <p>The next step to get Serato Skin for Virtual DJ is to install it on your computer so that you can use it with your Virtual DJ software. This is a very easy process that only requires copying and pasting one file into one folder.</p>
69
- <h3>Step 1: Locate the Skin folder in your Virtual DJ directory</h3>
70
- <p>To install Serato Skin for Virtual DJ, you need to find where your <strong>Skin folder</strong> is located in your <strong>Virtual DJ directory</strong>. The default location of this folder is usually <code>C:\Users\YourName\Documents\VirtualDJ\Skins</code>, but it may vary depending on how you installed your software or what version you have.</p>
71
- <p>To find your skin folder easily, open your <strong>VirtualDJ software</strong> and go to <strong>Settings > Interface > Skins > Open Folder</strong>. This will open your skin folder in a new window where you can see all the skins that you have installed or available.</p>
72
- <h3>Step 2: Copy and paste the Serato Skin for VirtualDJ file into the skin folder</h3>
73
- <p>To install Serato Skin for VirtualDJ, you need to copy and paste one file into your skin folder. The file is called <code>Seratovdj.zip</code>, which is located inside the folder that you extracted from Sonatty's website (e.g., <code>Seratovdj2020.zip</code>). To copy this file, right-click on it and select <strong>Copy</strong>. Then go back to your skin folder window and right-click on an empty space and select <strong>Paste</strong>. This will add this file into your skin folder along with other skins.</p>
74
- <h3>Step 3: Open your virtualDJ software and select seratovdj from interface settings </h3>
75
- <p>To use seratovdj skin with virtualDJ software ,you need open virtualDJ software then go settings > interface > skins > seratovdj .This will change look virtualDJ software like seratodj pro .You can also switch between different skins anytime by repeating this process . </p>
76
- <h2>How to Use seratovdj skin with virtualDJ </h2>
77
- <p>The final step to get seratovdj skin with virtualDJ is enjoy mixing scratching skills .seratovdj skin gives best both worlds ,combining power flexibility virtualDJ sleek intuitive interface seratodj pro .You can explore features functions seratovdj skin customize according preferences .Here some tips tricks use seratovdj skin virtualDJ :</p>
78
- <h3>Step 1: Explore the features and functions of seratovdj skin for virtualDJ</h3>
79
- <p>seratovdj skin for virtualDJ has many features and functions that mimic the design and features of seratodj pro. Some of the main features and functions are:</p>
80
- <ul>
81
- <li><strong>Waveforms</strong>: seratovdj skin for virtualDJ displays the waveforms of the tracks that you are playing or loading in different colors and shapes. You can zoom in or out of the waveforms, adjust their brightness and contrast, and sync them with the beatgrid. You can also see the cue points, loops, and effects on the waveforms.</li>
82
- <li><strong>Decks</strong>: seratovdj skin for virtualDJ has two or four decks that you can use to mix and scratch your tracks. You can switch between the decks by clicking on the deck number or using a keyboard shortcut. You can also see the track information, BPM, pitch, time, key, and mode on each deck.</li>
83
- <li><strong>Mixer</strong>: seratovdj skin for virtualDJ has a mixer that allows you to control the volume, gain, EQ, filter, crossfader, and headphone cue of each deck. You can also use the mixer to apply effects, samples, and loops to your tracks.</li>
84
- <li><strong>Library</strong>: seratovdj skin for virtualDJ has a library that lets you browse and load your tracks from your computer or external devices. You can also search for tracks by name, artist, genre, BPM, key, or color. You can also create and manage playlists, crates, smart crates, and history.</li>
85
- <li><strong>Effects</strong>: seratovdj skin for virtualDJ has a variety of effects that you can use to spice up your mixes. You can choose from echo, flanger, phaser, reverb, delay, filter, gater, slicer, and more. You can also adjust the parameters of each effect and apply them to individual decks or to the master output.</li>
86
- <li><strong>Samples</strong>: seratovdj skin for virtualDJ has a sample player that lets you trigger sounds from your computer or external devices. You can load up to 32 samples in 8 banks and assign them to different pads. You can also adjust the volume, pitch, loop mode, and sync mode of each sample.</li>
87
- <li><strong>Loops</strong>: seratovdj skin for virtualDJ has a loop function that lets you create and manipulate loops on your tracks. You can set the loop length manually or automatically using beatjump or snap mode. You can also save and recall loops using hot cues or memory cues.</li>
88
- <li><strong>Cues</strong>: seratovdj skin for virtualDJ has a cue function that lets you mark and jump to specific points on your tracks. You can set up to 8 hot cues per deck and trigger them using pads or keyboard shortcuts. You can also set memory cues that are saved with your tracks and visible on the waveforms.</li>
89
- </ul>
90
- <h3>Step 2: Customize the seratovdj skin for virtualDJ according to your preferences</h3>
91
- <p>seratovdj skin for virtualDJ is highly customizable and allows you to change its appearance and behavior according to your preferences. You can access the customization options by clicking on the <strong>Settings</strong> button on the top right corner of the skin. Some of the customization options are:</p>
92
- <ul>
93
- <li><strong>Skin Layout</strong>: You can choose between different layouts for the skin, such as 2 Decks Horizontal Waveform (default), 2 Decks Vertical Waveform, 4 Decks Horizontal Waveform, 4 Decks Vertical Waveform, etc.</li>
94
- <li><strong>Skin Color</strong>: You can choose between different colors for the skin, such as Blue (default), Red, Green, Purple, etc.</li>
95
- <li><strong>Skin Mode</strong>: You can choose between different modes for the skin, such as Performance (default), Library Only (for browsing tracks), Video (for mixing videos), etc.</li>
96
- <li><strong>Waveform Color</strong>: You can choose between different colors for the waveforms, such as RGB (default), Mono (white), Inverted (black), etc.</li>
97
- <li><strong>Waveform Shape</strong>: You can choose between different shapes for the waveforms, such as Filled (default), Outline (transparent), Dots (dots), etc.</li>
98
- <li><strong>Waveform Zoom</strong>: You can adjust the zoom level of the waveforms using a slider or a keyboard shortcut.</li>
99
- <li><strong>Brightness/Contrast</strong>: You can adjust the brightness and contrast of the waveforms using sliders or keyboard shortcuts.</li>
100
- <li><strong>Crossfader Curve</strong>: You can adjust the curve of the crossfader using a slider or a keyboard shortcut.</li>
101
- <li><strong>Pitch Range</strong>: You can adjust the pitch range of each deck using a slider or a keyboard shortcut.</li>
102
- <li><strong>Pitch Lock</strong>: You can lock or unlock the pitch of each deck using a button or a keyboard shortcut.</li>
103
- <li><strong>Key Lock</strong>: You can lock or unlock the key of each deck using a button or a keyboard shortcut.</li>
104
- <li><strong>Sync Mode</strong>: You can choose between different sync modes for each deck using a button or a keyboard shortcut.</li>
105
- <li><strong>Quantize Mode</strong>: You can enable or disable quantize mode for each deck using a button or a keyboard shortcut.</li>
106
- <li><strong>Slip Mode</strong>: You can enable or disable slip mode for each deck using a button or a keyboard shortcut.</li>
107
- <li><strong>Vinyl Mode</strong>: You can enable or disable vinyl mode for each deck using a button or a keyboard shortcut.</li>
108
- <li><strong>MIDI Mapping</strong>: You can map any function of seratovdj skin for virtualDJ to any MIDI controller using a button or a keyboard shortcut.</li>
109
- <li><strong>Keyboard Mapping</strong>: You can map any function of seratovdj skin for virtualDJ to any key on your keyboard using a button or a keyboard shortcut.</li>
110
- <li><strong>Skin Options</strong>: You can enable or disable various options for seratovdj skin for virtualDJ , such as Show/Hide Browser Panel , Show/Hide Mixer Panel , Show/Hide Effects Panel , Show/Hide Samples Panel , Show/Hide Loops Panel , Show/Hide Cues Panel , etc.</li>
111
- </ul>
112
- <h3>Step 3: Enjoy mixing and scratching with seratovdj skin for virtualDJ </h3>
113
- <p>The last step to get seratovdj skin for virtualDJ is to enjoy mixing and scratching with it. seratovdj skin for virtualDJ gives you all the tools and features that you need to create amazing mixes and scratches that will impress your audience and yourself. Whether you are a beginner or an expert DJ ,seratovdj skin for virtualDJ will help you unleash your creativity and have fun with your music . </p>
114
- <h2>Conclusion </h2>
115
- <p>In this article ,we showed you how to download free seratovdj skin for virtualDJ ,how to install it on your computer ,and how to use it to enhance your mixing and scratching skills .By following these simple steps ,you will be able to transform your virtualDJ into a serato-like experience that will impress your audience and yourself . </p>
116
- <p>Serato Skin for Virtual DJ is one of the most popular and versatile skins for Virtual DJ that mimics the design and features of Serato DJ Pro .It gives you the best of both worlds ,combining the power and flexibility of Virtual DJ with the sleek and intuitive interface of Serato DJ Pro .You can explore its features and functions ,customize it according to your preferences ,and enjoy mixing and scratching with it . </p>
117
- <p>If you are looking for a new way to spice up your Virtual DJ software ,we highly recommend you to try Serato Skin for Virtual DJ .You will not regret it .It is free ,easy ,and fun .What are you waiting for ?Download Serato Skin for Virtual DJ today and start mixing like a pro . </p>
118
- <h2>Frequently Asked Questions </h2>
119
- <ul>
120
- <li><p><b>Q: Where can I download free Serato Skin for Virtual DJ ? </b></p></li>
121
- <p>A: One of the websites that we recommend for downloading free Serato Skin for Virtual DJ is Sonatty ,a blog that provides useful information and resources for DJs .Sonatty has several versions of Serato Skin for Virtual DJ available ,including Serato DJ Pro 2.5 ,Serato DJ Pro 2.0 ,and more .You can also find other skins ,plugins ,effects ,samples ,and tutorials on Sonatty that can help you improve your performance as a DJ . </p>
122
- <li><p><b>Q: How do I install Serato Skin for Virtual DJ ? </b></p></li>
123
- <p>atty's website (e.g., Seratovdj2020.zip). To copy this file, right-click on it and select Copy. Then go to your Skin folder window and right-click on an empty space and select Paste. This will add this file into your Skin folder along with other skins.</p>
124
- <li><p><b>Q: How do I use Serato Skin for Virtual DJ ? </b></p></li>
125
- <p>A: To use Serato Skin for Virtual DJ ,you need to open your Virtual DJ software and select the Serato Skin from the interface settings. You can also switch between different skins anytime by repeating this process. To use Serato Skin for Virtual DJ ,you can explore its features and functions ,customize it according to your preferences ,and enjoy mixing and scratching with it .</p>
126
- <li><p><b>Q: What are the benefits of using Serato Skin for Virtual DJ ? </b></p></li>
127
- <p>A: The benefits of using Serato Skin for Virtual DJ are:</p>
128
- <ul>
129
- <li>It gives you a new look and feel for your Virtual DJ software that mimics the design and features of Serato DJ Pro.</li>
130
- <li>It combines the power and flexibility of Virtual DJ with the sleek and intuitive interface of Serato DJ Pro.</li>
131
- <li>It allows you to access and use many features and functions that are available in Serato DJ Pro ,such as waveforms ,decks ,mixer ,library ,effects ,samples ,loops ,cues ,etc.</li>
132
- <li>It is highly customizable and allows you to change its appearance and behavior according to your preferences.</li>
133
- <li>It is free ,easy ,and fun to use.</li>
134
- </ul>
135
- <li><p><b>Q: What are the requirements for installing Serato Skin for Virtual DJ ? </b></p></li>
136
- <p>A: The requirements for installing Serato Skin for Virtual DJ are:</p>
137
- <ul>
138
- <li>You need to have a computer that meets the minimum system requirements for running Virtual DJ software.</li>
139
- <li>You need to have a version of Virtual DJ software that is compatible with the version of Serato Skin for Virtual DJ that you want to download.</li>
140
- <li>You need to have a program that can extract compressed files (e.g., WinRAR or WinZip).</li>
141
- <li>You need to have an internet connection that can download files from Google Drive or other websites.</li>
142
- </ul>
143
- <li><p><b>Q: Is Serato Skin for Virtual DJ legal ? </b></p></li>
144
- <p>A: Serato Skin for Virtual DJ is legal as long as you download it from a reliable source that has permission from the original creators of Serato DJ Pro .You should not download or use any skin that infringes the intellectual property rights of Serato or any other company .You should also not use any skin that contains viruses ,malware ,or broken links that can harm your computer or compromise your privacy .</p>
145
- </ul>
146
- </p> 0a6ba089eb<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2019 Crashing The Causes and The Solutions.md DELETED
@@ -1,26 +0,0 @@
1
-
2
- <h1>How to Fix Excel 2019 Crashing Issues</h1>
3
- <p>Excel 2019 is the latest version of the popular spreadsheet software from Microsoft. It offers many new features and improvements, such as new functions, charts, data types, and more. However, some users have reported that Excel 2019 crashes frequently or unexpectedly on their computers. This can be very frustrating and annoying, especially if you are working on important or complex documents.</p>
4
- <h2>excel 2019 crashing</h2><br /><p><b><b>Download Zip</b> &#10042;&#10042;&#10042; <a href="https://byltly.com/2uKxNS">https://byltly.com/2uKxNS</a></b></p><br /><br />
5
- <p>Fortunately, there are some possible solutions that can help you fix Excel 2019 crashing issues and prevent them from happening again. In this article, we will show you some of the most common causes of Excel 2019 crashing and how to troubleshoot them. We will also give you some tips on how to optimize your Excel 2019 performance and avoid any errors.</p>
6
- <h2>What Causes Excel 2019 Crashing?</h2>
7
- <p>Excel 2019 crashing can be caused by various factors, such as:</p>
8
- <ul>
9
- <li>Corrupted or incompatible add-ins. Add-ins are extensions that enhance the functionality of Excel. However, some add-ins might not work well with Excel 2019 or might be corrupted or outdated. This can cause Excel 2019 to crash or freeze when you try to use them.</li>
10
- <li>Corrupted or damaged files. If your Excel files are corrupted or damaged due to virus infection, power outage, improper shutdown, or other reasons, they might cause Excel 2019 to crash when you try to open or save them.</li>
11
- <li>Insufficient memory or disk space. If your computer does not have enough memory or disk space to run Excel 2019 smoothly, it might cause Excel 2019 to crash or slow down.</li>
12
- <li>Outdated or incompatible drivers. Drivers are software that enable your computer to communicate with your hardware devices, such as printer, scanner, mouse, keyboard, etc. If your drivers are outdated or incompatible with Excel 2019, they might cause Excel 2019 to crash or malfunction.</li>
13
- <li>Software conflicts. If you have other software running in the background that interfere with Excel 2019, such as antivirus, firewall, VPN, etc., they might cause Excel 2019 to crash or behave erratically.</li>
14
- </ul>
15
- <p>To fix Excel 2019 crashing issues, you need to identify and resolve the underlying issue that is causing them. You can do this manually by following the steps in the next section. However, this can be time-consuming and complicated, especially if you are not familiar with the technical aspects of your computer.</p>
16
- <p>That's why using a professional tool like <a href="https://www.repairtoolbox.com/excelrepair.html">Excel Repair Toolbox</a> is a better option. This tool can automatically scan your computer and detect the cause of Excel 2019 crashing issues. It can also repair any errors and optimize your Excel 2019 performance.</p>
17
- <p></p>
18
- <h2>How to Troubleshoot Excel 2019 Crashing Issues?</h2>
19
- <p>If you want to troubleshoot Excel 2019 crashing issues manually, you can follow these steps:</p>
20
- <ol>
21
- <li>Disable or remove any add-ins that might be causing problems. To do this, open Excel 2019 and go to File > Options > Add-Ins. In the Manage drop-down list, select COM Add-ins and click Go. Uncheck any add-ins that you don't need or use and click OK. Restart Excel 2019 and see if the problem persists. If it does, repeat the same steps for other types of add-ins, such as Excel Add-ins, Analysis ToolPak, etc.</li>
22
- <li>Repair or recover any corrupted or damaged files. To do this, open Excel 2019 and go to File > Open. Locate the file that you want to repair and click on the arrow next to the Open button. Select Open and Repair from the menu and choose either Repair or Extract Data depending on the severity of the corruption. Follow the instructions on the screen to complete the process.</li>
23
- <li>Free up some memory or disk space on your computer. To do this, close any unnecessary programs or tabs that are running in the background. You can also use a disk cleanup tool like CCleaner to delete any temporary files, cache files, cookies, etc. that might be taking up space on your hard drive.</li>
24
- <li>Update or reinstall any drivers that might be outdated or incompatible with Excel 2019. To do this, go to Device Manager on your computer and look for any devices that</p> ddb901b051<br />
25
- <br />
26
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Version Of Corel Draw LINK.md DELETED
@@ -1,26 +0,0 @@
1
-
2
- <h1>How to Get a Free Version of CorelDRAW</h1>
3
- <p>CorelDRAW is a popular graphic design software that allows you to create vector illustrations, page layouts, photo editing, typography, and more. However, CorelDRAW is not a cheap software and may not be affordable for everyone. If you are looking for a free version of CorelDRAW, you have a few options to consider.</p>
4
- <h2>Option 1: Download a Free Trial of CorelDRAW Graphics Suite</h2>
5
- <p>The easiest way to get a free version of CorelDRAW is to download a free trial of CorelDRAW Graphics Suite from the official website. The free trial gives you full access to all the features and content that come with a CorelDRAW Graphics Suite subscription for 15 days. You can use the free trial to explore the software and create your own projects without any limitations. However, after 15 days, you will need to purchase a subscription or a one-time license to continue using the software.</p>
6
- <h2>free version of corel draw</h2><br /><p><b><b>Download Zip</b> &rarr; <a href="https://byltly.com/2uKwmJ">https://byltly.com/2uKwmJ</a></b></p><br /><br />
7
- <p>To download the free trial, you need to visit <a href="https://www.coreldraw.com/en/pages/free-download/">this page</a> and click on the "Download Now" button. You will need to enter your name and email address and agree to the terms and conditions. Then, you will receive an email with a download link and instructions on how to install and activate the software. You can also access the online help, tutorials, and resources from the website to learn how to use the software.</p>
8
- <h2>Option 2: Use CorelDRAW.app Online or on iPad</h2>
9
- <p>Another way to get a free version of CorelDRAW is to use CorelDRAW.app, which is an online or iPad app that lets you create vector illustrations and graphic designs via web browser or tablet. CorelDRAW.app is included as part of the CorelDRAW Graphics Suite subscription, but you can also use it for free with some limitations. The free version of CorelDRAW.app allows you to create up to 5 projects and save them in the cloud. You can also export your projects as PNG or JPEG files.</p>
10
- <p>To use CorelDRAW.app online, you need to visit <a href="https://www.coreldraw.app/en/">this page</a> and sign up for a free account. You can also sign in with your existing Corel account if you have one. Then, you can start creating your projects using the online interface and tools. You can also access the online help, tutorials, and resources from the website to learn how to use the app.</p>
11
- <p>To use CorelDRAW.app on iPad, you need to download the app from the App Store and sign in with your free or paid Corel account. Then, you can start creating your projects using the iPad interface and tools. You can also access the online help, tutorials, and resources from the app to learn how to use it.</p>
12
- <h2>Option 3: Use an Alternative Graphic Design Software</h2>
13
- <p>The third way to get a free version of CorelDRAW is to use an alternative graphic design software that offers similar or better features and functionality. There are many free or low-cost graphic design software available on the market that can help you create vector illustrations, page layouts, photo editing, typography, and more. Some of these software are:</p>
14
- <ul>
15
- <li>Inkscape: A free and open-source vector graphics editor that supports SVG format and has many tools and features similar to CorelDRAW.</li>
16
- <li>GIMP: A free and open-source image editor that supports various formats and has many tools and features similar to Corel PHOTO-PAINT.</li>
17
- <li>Scribus: A free and open-source desktop publishing software that supports various formats and has many tools and features similar to CorelDRAW's page layout capabilities.</li>
18
- <li>Gravit Designer: A free online or desktop vector graphics editor that supports various formats and has many tools and features similar to CorelDRAW.</li>
19
- <li>Krita: A free and open-source digital painting software that supports various formats and has many tools and features similar to Corel PHOTO-PAINT.</li>
20
- </ul>
21
- <p>To use these alternative graphic design software, you need to visit their respective websites and download or access them online. You can also find online help, tutorials, and resources from their websites or communities to learn how to use them.</p>
22
- <h3>Conclusion</h3>
23
- <p>CorelDRAW is a powerful graphic design</p>
24
- <p></p> ddb901b051<br />
25
- <br />
26
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ashrae Standard 170 Pdf 17l How to Download and Apply the Latest Addendum.md DELETED
@@ -1,15 +0,0 @@
1
- <br />
2
- <p>ANSI/ASHRAE/ASHE Standard 170 offers guidance, regulation and mandates to designers and operators of health care facilities. The revised 2021 edition incorporates 17 addenda to the 2017 standard. The updated standard includes the following significant changes: Revised scope, with improved guidance on thermal comfort conditions provided; Extensive modifications to address the Outpatient and Residential sections; Addition of a new outpatient ventilation table to address non-acute-type spaces; Extensive revisions to air filtration requirements; Addition of new columns in the ventilation tables to prescribe filtration requirement and designate unoccupied turndown; Expanded guidance on separation distance requirements for varied intake and exhaust arrangements, coordinating with related ASHRAE Standard 62.1 data;</p>
3
- <h2>Ashrae Standard 170 Pdf 17l</h2><br /><p><b><b>Download</b> &gt; <a href="https://imgfil.com/2uy27f">https://imgfil.com/2uy27f</a></b></p><br /><br />
4
- <p>HVAC systems are built to keep the indoor air quality (IAQ) safe for patients. Because of the airflow standards, equipment must meet high ventilation rates and filtration requirements. Healthcare facilities serve a critical service, and they must consider several factors to provide adequate public health. Among the concerns for health care facilities is airflow ventilation.</p>
5
- <p>In healthcare facilities, humidifiers prevent the spread of bacteria and viruses. The standard's (ANSI/ASHRAE/ASHE 170) ventilation system addresses temperature and humidity that could be compromised without the proper attention and care.</p>
6
- <p>Ophthalmology is already one of the busiest outpatient specialties in healthcare. Each patient's journey includes several healthcare personnel interacting to undertake routine objective assessments which is often followed by specialized imaging. The clinical consultation can take an average of 8 min and includes a close proximity slit-lamp examination to systematically inspect the eye and its adnexa. During the Wuhan outbreak of COVID-19, nosocomial transmission was reported to be highest in ENT and Ophthalmology.[17] The standard high-volume practice observed in ophthalmic units is therefore very high-risk and cannot be underestimated in subjecting staff and patients to contracting SARS-CoV-2.</p>
7
- <p></p>
8
- <p>Filtering facepiece respirators (FFPs), on the other hand, provide additional benefit to surgical masks by providing an air-tight seal and containing a mechanical filter, which can remove airborne contaminants through interception. Health and Safety Executive and British Safety Industry Federation recommend fit testing to ensure the respirator is suited to the user's facial structure and therefore performs optimally. There are three categories of FFP in Europe: FFP1, FFP2 (equivalent to N95), and FFP3. Class three (FFP3) provides the highest quality of protection and is the only one approved for UK healthcare settings, especially in AGPs, such as intubation and non-invasive ventilation. They must meet industry-standard regulations including strict industry tests with biological aerosols and cannot exceed 2% leakage. FFP3 masks provide 99% efficiency in filtering particles sized above 100 nm, including small airborne droplets.[22,24]</p>
9
- <p>Adopts the current versions of the industry standards SAE J639, SAE J1739, and SAE J2844 in the use conditions for the proposed listings of HFO-1234yf in nonroad vehicles and previous listings for certain onroad vehicles.</p>
10
- <p>EPA is rescinding use conditions that limit human exposure to halocarbon and inert gas agents used in the fire suppression and explosion protection industry. These use conditions are redundant with safety standards established by the National Fire Protection Association (NFPA). In addition, EPA is taking direct final action to change the listing for HBFC-22B1 from acceptable subject to use conditions to unacceptable.</p>
11
- <p>This notice identifies EPA's decisions of acceptable substitutes for refrigeration, air conditioning, foams, non-aerosol solvent cleaning, and aerosol solvents. This action also requests information on the composition and safety of certain refrigerants for motor vehicle air conditioners. This notice also requests information on whether the SNAP program should include review of and establishment of use conditions for operations that involve manual cleaning with solvents or restriction of non-aerosol solvent substitutes to equipment that meets the cleaning equipment standards in the National Emission Standards for Halogenated Solvent Cleaning. Finally, this action updates readers on the SNAP program's review of n-propyl bromide for use as a substitute for ozone-depleting solvents used in the non-aerosol solvents cleaning, aerosol solvents and propellants, and adhesives, coatings and inks sectors.</p>
12
- <p><strong>Description</strong><br />EPA is rescinding use conditions that limit human exposure to halocarbon and inert gas agents used in the fire suppression and explosion protection industry. These use conditions are redundant with safety standards established by the National Fire Protection Association (NFPA). In addition, EPA is taking direct final action to change the listing for HBFC-22B1 from acceptable subject to use conditions to unacceptable.</p>
13
- <p><strong>Description</strong>:<br />This notice identifies EPA's decisions of acceptable substitutes for refrigeration, air conditioning, foams, non-aerosol solvent cleaning, and aerosol solvents. This action also requests information on the composition and safety of certain refrigerants for motor vehicle air conditioners. This notice also requests information on whether the SNAP program should include review of and establishment of use conditions for operations that involve manual cleaning with solvents or restriction of non-aerosol solvent substitutes to equipment that meets the cleaning equipment standards in the National Emission Standards for Halogenated Solvent Cleaning. Finally, this action updates readers on the SNAP program's review of n-propyl bromide for use as a substitute for ozone-depleting solvents used in the non-aerosol solvents cleaning, aerosol solvents and propellants, and adhesives, coatings and inks sectors.</p> aaccfb2cb3<br />
14
- <br />
15
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Disk Drill Pro 3.6.918 Crack Activation Code ((FREE)) Free Download 2019.md DELETED
@@ -1,29 +0,0 @@
1
- <br />
2
- <h1>Disk Drill Pro 3.6.918 Crack Activation Code Free Download 2019</h1>
3
- <p>Disk Drill Pro is a powerful data recovery software for Windows and Mac. It can recover lost files from any type of storage device, including hard drives, USB flash drives, memory cards, and more. Disk Drill Pro also offers data loss prevention features, such as Recovery Vault and Guaranteed Recovery, that can protect your data from accidental deletion or corruption.</p>
4
- <p>In this article, we will show you how to download and install Disk Drill Pro 3.6.918 Crack Activation Code for free. This is a cracked version of Disk Drill Pro that bypasses the license verification and allows you to use all the features of the software without paying for it. However, we do not recommend using cracked software, as it may contain viruses, malware, or other harmful components that can damage your system or compromise your privacy. Moreover, using cracked software is illegal and unethical, as it violates the terms and conditions of the original software developer.</p>
5
- <h2>Disk Drill Pro 3.6.918 Crack Activation Code Free Download 2019</h2><br /><p><b><b>DOWNLOAD</b> &#127383; <a href="https://imgfil.com/2uy27H">https://imgfil.com/2uy27H</a></b></p><br /><br />
6
- <p>If you want to use Disk Drill Pro legally and safely, you should purchase it from the official website[^2^] [^3^] [^4^]. You can also try the free version of Disk Drill Basic[^4^], which allows you to recover up to 500 MB of data for free and preview all the recoverable files before recovery. You can also get a 50% discount if you upgrade from a previous version[^3^], or a 20% discount if you are a student, educator, government employee, or non-profit organization member[^3^].</p>
7
- <p>However, if you still want to download and install Disk Drill Pro 3.6.918 Crack Activation Code for free, here are the steps you need to follow:</p>
8
- <ol>
9
- <li>Download Disk Drill Pro 3.6.918 Crack Activation Code Free Download 2019 from this link[^1^]. This is a zip file that contains the setup file and the crack file.</li>
10
- <li>Extract the zip file to a folder on your computer.</li>
11
- <li>Run the setup file and follow the instructions to install Disk Drill Pro on your computer.</li>
12
- <li>Do not launch Disk Drill Pro after installation.</li>
13
- <li>Copy the crack file and paste it into the installation folder of Disk Drill Pro. This will replace the original file and activate the software.</li>
14
- <li>Launch Disk Drill Pro and enjoy all the features for free.</li>
15
- </ol>
16
- <p>Note: This method is only for educational purposes. We do not take any responsibility for any damage or loss caused by using cracked software. We strongly advise you to purchase Disk Drill Pro from the official website if you want to use it legally and safely.</p>
17
-
18
- <p>Now that you have installed Disk Drill Pro 3.6.918 Crack Activation Code for free, you can use it to recover your lost or deleted data from any storage device. Here are some tips on how to use Disk Drill Pro effectively:</p>
19
- <ul>
20
- <li>Before you start a scan, make sure that your storage device is connected to your computer and recognized by Disk Drill Pro. You can see the list of available devices on the left panel of the software.</li>
21
- <li>Select the device that you want to scan and click on the "Recover" button. Disk Drill Pro will start a quick scan first, which will take a few minutes. If you don't find your files after the quick scan, you can proceed to a deep scan, which will take longer but will find more files.</li>
22
- <li>After the scan is complete, you can preview the found files by clicking on them. You can also filter the files by type, size, date, or name using the options on the right panel of the software.</li>
23
- <li>When you find the files that you want to recover, select them and click on the "Recover" button again. You will be asked to choose a location to save the recovered files. Make sure that you don't save them to the same device that you scanned, as this may overwrite the original data and make it unrecoverable.</li>
24
- <li>Enjoy your recovered files and back them up to a safe location.</li>
25
- </ul>
26
- <p>Disk Drill Pro 3.6.918 Crack Activation Code is a powerful data recovery software that can help you recover your lost or deleted data from any storage device. However, using cracked software is risky and illegal, and we do not recommend it. If you want to use Disk Drill Pro legally and safely, you should purchase it from the official website . You can also try the free version of Disk Drill Basic, which allows you to recover up to 500 MB of data for free and preview all the recoverable files before recovery.</p>
27
- <p></p> d5da3c52bf<br />
28
- <br />
29
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Drive Club Pc Game Download Kickass 61.md DELETED
@@ -1,11 +0,0 @@
1
- <h2>Drive Club Pc Game Download Kickass 61</h2><br /><p><b><b>Download File</b> &middot;&middot;&middot;&middot;&middot; <a href="https://imgfil.com/2uxZwa">https://imgfil.com/2uxZwa</a></b></p><br /><br />
2
- <br />
3
- PLAY MULTIPLAYER IN REAL TIME RIGHT NOW! Jump online to drift and race against live opponents! JOIN THE RACING APP REVOLUTION True next-gen driving...with stunning graphics and realistic racing. You can play real-time multiplayer games on Google Stadia and Windows PC.
4
- ***
5
- In this game you will be able to get behind the wheel of the coolest motorcycles that have ever existed and have existed in history. Get ready to experience the thrill of driving like you've never experienced before.
6
- COMPARE GAMES WITH OTHER MANUFACTURERS
7
- Google Play:
8
- https://play.google.com/store/apps/details?id=com.appgift.pumpinbikes 8a78ff9644<br />
9
- <br />
10
- <br />
11
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dirt Rally 2.0 Apk The Best Mods and Add-Ons for More Fun and Challenge.md DELETED
@@ -1,121 +0,0 @@
1
-
2
- <h1>DiRT Rally 2.0 APK: How to Download and Play the Best Rally Game on Your Android Device</h1>
3
- <p>If you are a fan of rally racing, you might have heard of DiRT Rally 2.0, the latest installment in the popular DiRT series by Codemasters. This game is widely regarded as one of the best rally games ever made, with realistic physics, stunning graphics, and immersive gameplay. But did you know that you can also play DiRT Rally 2.0 on your Android device? Yes, you read that right. With DiRT Rally 2.0 APK, you can enjoy this amazing game on your smartphone or tablet, without any hassle or compromise.</p>
4
- <p>In this article, we will tell you everything you need to know about DiRT Rally 2.0 APK, including what it is, how to download and install it, and some tips and tricks for playing it. So buckle up and get ready for some adrenaline-pumping action.</p>
5
- <h2>dirt rally 2.0 apk</h2><br /><p><b><b>DOWNLOAD</b> &middot;&middot;&middot;&middot;&middot; <a href="https://urlin.us/2uSZsC">https://urlin.us/2uSZsC</a></b></p><br /><br />
6
- <h2>What is DiRT Rally 2.0?</h2>
7
- <p>DiRT Rally 2.0 is a racing video game that focuses on rally and rallycross disciplines. It was released in February 2019 for Windows, PlayStation 4, and Xbox One, and later for Google Stadia in March 2020. It is the thirteenth game in the Colin McRae Rally series and the eighth game to carry the DiRT name.</p>
8
- <p>DiRT Rally 2.0 dares you to carve your way through a selection of iconic rally locations from across the globe, in the most powerful off-road vehicles ever made, knowing that the smallest mistake could end your stage. You can compete in six rally locations (Argentina, Australia, New Zealand, Poland, Spain, and USA) and eight rallycross circuits (Abu Dhabi, Barcelona, Hell, Holjes, Latvia, Mettet, Montalegre, and Silverstone), with over 50 cars to choose from.</p>
9
- <p>DiRT Rally 2.0 also features a career mode, where you can create your own team, hire staff, upgrade your cars, and manage your finances. You can also join online events and challenges, where you can compete with other players from around the world.</p>
10
- <h3>Features of DiRT Rally 2.0</h3>
11
- <p>Some of the features that make DiRT Rally 2.0 stand out from other racing games are:</p>
12
- <ul>
13
- <li><b>Realistic physics:</b> The game uses a sophisticated physics engine that simulates every aspect of rally driving, such as traction, suspension, weight transfer, tire wear, surface deformation, weather effects, and damage.</li>
14
- <li><b>Stunning graphics:</b> The game boasts of high-quality graphics that bring the rally locations to life, with dynamic lighting, shadows, reflections, dust, mud, water splashes, and smoke.</li>
15
- <li><b>Immersive gameplay:</b> The game offers a first-person perspective that puts you in the driver's seat of your car, with a detailed cockpit view and authentic sound effects. You can also use a co-driver's voice to guide you through the stages.</li>
16
- <li><b>Customization:</b> The game allows you to customize your car's appearance and performance, with various liveries, parts, setups, and tuning options.</li>
17
- <li><b>Variety:</b> The game offers a variety of modes, cars, locations, events, and challenges to keep you engaged and entertained.</li>
18
- </ul>
19
- <h3>Requirements for DiRT Rally 2.0 APK</h3>
20
- <p>DiRT Rally 2.0 APK is a modified version of the original game that allows you to play it on your Android device. However, not all devices are compatible with this APK. To run DiRT Rally 2.0 APK smoothly, you need to have the following requirements:</p>
21
- <p>dirt rally 2.0 mod apk download<br />
22
- dirt rally 2.0 apk obb<br />
23
- dirt rally 2.0 android apk<br />
24
- dirt rally 2.0 apk data<br />
25
- dirt rally 2.0 apk free download<br />
26
- dirt rally 2.0 apk offline<br />
27
- dirt rally 2.0 apk rexdl<br />
28
- dirt rally 2.0 apk revdl<br />
29
- dirt rally 2.0 apk pure<br />
30
- dirt rally 2.0 apk hack<br />
31
- dirt rally 2.0 apk latest version<br />
32
- dirt rally 2.0 apk full version<br />
33
- dirt rally 2.0 apk + mod + data<br />
34
- dirt rally 2.0 apk unlimited money<br />
35
- dirt rally 2.0 apk + obb download<br />
36
- dirt rally 2.0 apk + data download<br />
37
- dirt rally 2.0 mod apk android<br />
38
- dirt rally 2.0 mod apk obb<br />
39
- dirt rally 2.0 mod apk free download<br />
40
- dirt rally 2.0 mod apk offline<br />
41
- dirt rally 2.0 mod apk rexdl<br />
42
- dirt rally 2.0 mod apk revdl<br />
43
- dirt rally 2.0 mod apk pure<br />
44
- dirt rally 2.0 mod apk hack<br />
45
- dirt rally 2.0 mod apk latest version<br />
46
- dirt rally 2.0 mod apk full version<br />
47
- dirt rally 2.0 mod apk + data<br />
48
- dirt rally 2.0 mod apk unlimited money<br />
49
- dirt rally 2.0 android apk download<br />
50
- dirt rally 2.0 android apk obb<br />
51
- dirt rally 2.0 android apk free download<br />
52
- dirt rally 2.0 android apk offline<br />
53
- dirt rally 2.0 android apk rexdl<br />
54
- dirt rally 2.0 android apk revdl<br />
55
- dirt rally 2.0 android apk pure<br />
56
- dirt rally 2.0 android apk hack<br />
57
- dirt rally 2.0 android apk latest version<br />
58
- dirt rally 2.0 android apk full version<br />
59
- dirt rally 2.0 android apk + data<br />
60
- dirt rally 2.0 android apk unlimited money<br />
61
- how to install dirt rally 2.0 apk on android<br />
62
- how to play dirt rally 2.0 offline on android<br />
63
- how to download and install dirt rally 2.0 on android for free <br />
64
- how to get unlimited money in dirt rally 2.0 on android <br />
65
- how to update dirt rally 2.0 on android <br />
66
- best settings for dirt rally 2.0 on android <br />
67
- best cars in dirt rally 2.0 on android <br />
68
- best tracks in dirt rally 2.0 on android <br />
69
- best mods for dirt rally 2.0 on android</p>
70
- <ul>
71
- <li><b>Android version:</b> You need to have Android 5.0 or higher on your device.</li>
72
- <li><b>Storage space:</b> You need to have at least 4 GB of free space on your device.</li>
73
- <li><b>RAM:</b> You need to have at least 2 GB of RAM on your device.</li>
74
- <li><b>Processor:</b> You need to have a quad-core processor or higher on your device.</li>
75
- <li><b>Graphics:</b> You need to have a GPU that supports OpenGL ES 3.0 or higher on your device.</li>
76
- <li><b>Internet connection:</b> You need to have a stable internet connection to download the APK file and the additional data files.</li>
77
- </ul>
78
- <p>If you meet these requirements, you can proceed to download and install DiRT Rally 2.0 APK on your device.</p>
79
- <h2>How to Download and Install DiRT Rally 2.0 APK</h2>
80
- <p>To download and install DiRT Rally 2.0 APK on your device, you need to follow these steps:</p>
81
- <h3>Step 1: Download the APK file</h3>
82
- <p>The first step is to download the APK file of DiRT Rally 2.0 from a reliable source. You can use this link to download the APK file, which is about 40 MB in size. Make sure you download the file from a trusted website, as some websites may contain malware or viruses that can harm your device.</p>
83
- <h3>Step 2: Enable unknown sources</h3>
84
- <p>The next step is to enable unknown sources on your device. This is necessary because DiRT Rally 2.0 APK is not available on the Google Play Store, and you need to allow your device to install apps from other sources. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but you can ignore it and proceed.</p>
85
- <h3>Step 3: Install the APK file</h3>
86
- <p>The third step is to install the APK file on your device. To do this, locate the downloaded file in your file manager and tap on it. You may see a pop-up window asking for permissions, but you can grant them and continue. The installation process may take a few minutes, depending on your device's performance.</p>
87
- <h3>Step 4: Launch the game and enjoy</h3>
88
- <p>The final step is to launch the game and enjoy it. To do this, go to your app drawer and tap on the DiRT Rally 2.0 icon. You may see a loading screen that will download some additional data files, which are about 1 GB in size. This may take some time, depending on your internet speed. Once the download is complete, you can start playing the game and experience the thrill of rally racing.</p>
89
- <h2>Tips and Tricks for Playing DiRT Rally 2.0 APK</h2>
90
- <p>DiRT Rally 2.0 APK is not an easy game to master, as it requires skill, concentration, and patience. However, with some tips and tricks, you can improve your performance and enjoy the game more. Here are some tips and tricks for playing DiRT Rally 2.0 APK:</p>
91
- <h3>Choose the right car and settings</h3>
92
- <p>The first tip is to choose the right car and settings for each stage and event. Different cars have different strengths and weaknesses, such as speed, handling, acceleration, braking, and durability. You should choose a car that suits your driving style and the terrain of the stage. For example, if you are driving on a gravel road, you may want a car that has good traction and suspension. You should also adjust the settings of your car according to your preference and skill level. You can change things like gear ratio, differential, brake bias, suspension stiffness, ride height, camber angle, anti-roll bar, tire pressure, and more. These settings can affect how your car behaves on the road, so you should experiment with them until you find the optimal setup.</p>
93
- <h3>Learn the tracks and practice</h3>
94
- <p>The second tip is to learn the tracks and practice them before competing in an event. Each track has its own characteristics, such as turns, bumps, jumps, hazards, weather conditions, and more. You should familiarize yourself with these features and memorize them as much as possible. You should also practice driving on them, either in the free roam mode or in the time trial mode. This will help you improve your skills, confidence, and timing. You can also watch the replays of your runs or other players' runs to learn from their mistakes and successes.</p>
95
- <h3>Use the co-driver's calls</h3>
96
- <p>The third tip is to use the co-driver's calls to guide you through the stages. The co-driver is your navigator who tells you what to expect ahead, such as the direction, distance, and severity of the turns, the road conditions, the hazards, and the landmarks. The co-driver's calls are based on a standardized system of symbols and numbers that you should learn and understand. For example, "Left 3 over crest" means that there is a left turn with a severity of 3 (out of 6) that goes over a crest. You should listen to the co-driver's calls carefully and follow them accordingly. They can help you prepare for the upcoming challenges and avoid crashes. You can also adjust the volume, timing, and language of the co-driver's calls in the settings menu.</p>
97
- <h3>Adjust the difficulty and assists</h3>
98
- <p>The fourth tip is to adjust the difficulty and assists of the game according to your skill level and preference. The game offers several options to customize your experience, such as: <ul>
99
- <li><b>Difficulty level:</b> You can choose from five difficulty levels, ranging from very easy to very hard. The difficulty level affects how fast and aggressive your opponents are, how much time you have to complete a stage, and how much money you earn.</li>
100
- <li><b>Assists:</b> You can enable or disable various assists that can help you control your car, such as traction control, stability control, anti-lock brakes, automatic gearbox, launch control, hill start assist, and more. The assists can make the game easier or more realistic, depending on your preference.</li>
101
- <li><b>Camera view:</b> You can choose from several camera views that can affect your visibility and immersion, such as cockpit view, hood view, bumper view, chase view, helicopter view, and more.</li>
102
- <li><b>HUD:</b> You can customize the heads-up display that shows you information such as speedometer, rev counter, gear indicator, damage indicator, timer, map, co-driver's calls, and more. You can turn on or off any of these elements or change their position and size.</li>
103
- </ul>
104
- You can experiment with these options until you find the best combination for you.</p>
105
- <h2>Conclusion</h2>
106
- <p>DiRT Rally 2.0 APK is a great way to enjoy one of the best rally games ever made on your Android device. It offers realistic physics, stunning graphics, immersive gameplay, customization options, variety of modes, cars, locations, events, and challenges. It is not an easy game to master, but with some tips and tricks, you can improve your performance and have fun. To play DiRT Rally 2.0 APK on your device, you need to meet some requirements, download and install the APK file from a reliable source, and enable unknown sources on your device. You can then launch the game and enjoy it. We hope this article has helped you learn more about DiRT Rally 2.0 APK and how to play it on your Android device. If you have any questions or feedback, feel free to leave a comment below. Happy racing! <h2>FAQs</h2>
107
- <p>Here are some frequently asked questions about DiRT Rally 2.0 APK:</p>
108
- <ol>
109
- <li><b>Is DiRT Rally 2.0 APK safe to download and install?</b></li>
110
- <p>Yes, DiRT Rally 2.0 APK is safe to download and install, as long as you get it from a reliable source. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain malware or viruses that can harm your device. You should also scan the APK file with an antivirus app before installing it.</p>
111
- <li><b>Is DiRT Rally 2.0 APK free to play?</b></li>
112
- <p>Yes, DiRT Rally 2.0 APK is free to play, as you do not need to pay anything to download and install it. However, the game may contain some in-app purchases or ads that can enhance your experience or support the developers.</p>
113
- <li><b>Can I play DiRT Rally 2.0 APK offline?</b></li>
114
- <p>No, DiRT Rally 2.0 APK requires an internet connection to download the additional data files and to access some of the online features, such as events and challenges. You can play the game offline only after you have downloaded all the data files and completed the initial setup.</p>
115
- <li><b>Can I play DiRT Rally 2.0 APK with a controller?</b></li>
116
- <p>Yes, DiRT Rally 2.0 APK supports various controllers that can connect to your Android device via Bluetooth or USB. You can use a controller to control your car and navigate the menus, as well as customize the button layout and sensitivity in the settings menu.</p>
117
- <li><b>Can I play DiRT Rally 2.0 APK with friends?</b></li>
118
- <p>Yes, DiRT Rally 2.0 APK allows you to play with friends online or locally. You can join online events and challenges, where you can compete with other players from around the world. You can also create or join a club, where you can invite your friends and share your progress and results. Alternatively, you can play locally with up to four players on the same device, using a split-screen mode.</p>
119
- </ol></p> 197e85843d<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_heun_discrete.py DELETED
@@ -1,254 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import paddle
20
-
21
- from ..configuration_utils import ConfigMixin, register_to_config
22
- from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS
23
- from .scheduling_utils import SchedulerMixin, SchedulerOutput
24
-
25
-
26
- class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
27
- """
28
- Implements Algorithm 2 (Heun steps) from Karras et al. (2022). for discrete beta schedules. Based on the original
29
- k-diffusion implementation by Katherine Crowson:
30
- https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L90
31
-
32
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
33
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
34
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
35
- [`~SchedulerMixin.from_pretrained`] functions.
36
-
37
- Args:
38
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
39
- beta_start (`float`): the starting `beta` value of inference.
40
- beta_end (`float`): the final `beta` value.
41
- beta_schedule (`str`):
42
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
43
- `linear` or `scaled_linear`.
44
- trained_betas (`np.ndarray`, optional):
45
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
46
- prediction_type (`str`, default `epsilon`, optional):
47
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
48
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
49
- https://imagen.research.google/video/paper.pdf)
50
- """
51
-
52
- _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
53
- order = 2
54
-
55
- @register_to_config
56
- def __init__(
57
- self,
58
- num_train_timesteps: int = 1000,
59
- beta_start: float = 0.00085, # sensible defaults
60
- beta_end: float = 0.012,
61
- beta_schedule: str = "linear",
62
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
63
- prediction_type: str = "epsilon",
64
- ):
65
- if trained_betas is not None:
66
- self.betas = paddle.to_tensor(trained_betas, dtype="float32")
67
- elif beta_schedule == "linear":
68
- self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32")
69
- elif beta_schedule == "scaled_linear":
70
- # this schedule is very specific to the latent diffusion model.
71
- self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2
72
- else:
73
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
74
-
75
- self.alphas = 1.0 - self.betas
76
- self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
77
-
78
- # set all values
79
- self.set_timesteps(num_train_timesteps, num_train_timesteps)
80
-
81
- def index_for_timestep(self, timestep):
82
- indices = (self.timesteps == timestep).nonzero()
83
- if self.state_in_first_order:
84
- pos = -1
85
- else:
86
- pos = 0
87
- return indices[pos].item()
88
-
89
- def scale_model_input(
90
- self,
91
- sample: paddle.Tensor,
92
- timestep: Union[float, paddle.Tensor],
93
- ) -> paddle.Tensor:
94
- """
95
- Args:
96
-
97
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
98
- current timestep.
99
- sample (`paddle.Tensor`): input sample timestep (`int`, optional): current timestep
100
-
101
- Returns:
102
- `paddle.Tensor`: scaled input sample
103
- """
104
- step_index = self.index_for_timestep(timestep)
105
-
106
- sigma = self.sigmas[step_index]
107
- sample = sample / ((sigma**2 + 1) ** 0.5)
108
- return sample
109
-
110
- def set_timesteps(
111
- self,
112
- num_inference_steps: int,
113
- num_train_timesteps: Optional[int] = None,
114
- ):
115
- """
116
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
117
-
118
- Args:
119
- num_inference_steps (`int`):
120
- the number of diffusion steps used when generating samples with a pre-trained model.
121
- num_train_timesteps (`int`, Optional): number of diffusion steps used to train the model.
122
- """
123
- self.num_inference_steps = num_inference_steps
124
-
125
- num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps
126
-
127
- timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy()
128
-
129
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
130
- sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
131
- sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
132
- sigmas = paddle.to_tensor(sigmas)
133
- self.sigmas = paddle.concat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
134
-
135
- # standard deviation of the initial noise distribution
136
- self.init_noise_sigma = self.sigmas.max()
137
-
138
- timesteps = paddle.to_tensor(timesteps)
139
- timesteps = paddle.concat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
140
-
141
- self.timesteps = timesteps
142
-
143
- # empty dt and derivative
144
- self.prev_derivative = None
145
- self.dt = None
146
-
147
- @property
148
- def state_in_first_order(self):
149
- return self.dt is None
150
-
151
- def step(
152
- self,
153
- model_output: Union[paddle.Tensor, np.ndarray],
154
- timestep: Union[float, paddle.Tensor],
155
- sample: Union[paddle.Tensor, np.ndarray],
156
- return_dict: bool = True,
157
- ) -> Union[SchedulerOutput, Tuple]:
158
- """
159
- Args:
160
-
161
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
162
- process from the learned model outputs (most often the predicted noise).
163
- model_output (`paddle.Tensor` or `np.ndarray`): direct output from learned diffusion model. timestep
164
- (`int`): current discrete timestep in the diffusion chain. sample (`paddle.Tensor` or `np.ndarray`):
165
- current instance of sample being created by diffusion process.
166
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
167
-
168
- Returns:
169
- [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
170
- [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
171
- returning a tuple, the first element is the sample tensor.
172
- """
173
- step_index = self.index_for_timestep(timestep)
174
-
175
- if self.state_in_first_order:
176
- sigma = self.sigmas[step_index]
177
- sigma_next = self.sigmas[step_index + 1]
178
- else:
179
- # 2nd order / Heun's method
180
- sigma = self.sigmas[step_index - 1]
181
- sigma_next = self.sigmas[step_index]
182
-
183
- # currently only gamma=0 is supported. This usually works best anyways.
184
- # We can support gamma in the future but then need to scale the timestep before
185
- # passing it to the model which requires a change in API
186
- gamma = 0
187
- sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
188
-
189
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
190
- if self.config.prediction_type == "epsilon":
191
- sigma_input = sigma_hat if self.state_in_first_order else sigma_next
192
- pred_original_sample = sample - sigma_input * model_output
193
- elif self.config.prediction_type == "v_prediction":
194
- sigma_input = sigma_hat if self.state_in_first_order else sigma_next
195
- pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
196
- sample / (sigma_input**2 + 1)
197
- )
198
- else:
199
- raise ValueError(
200
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
201
- )
202
-
203
- if self.state_in_first_order:
204
- # 2. Convert to an ODE derivative for 1st order
205
- derivative = (sample - pred_original_sample) / sigma_hat
206
- # 3. delta timestep
207
- dt = sigma_next - sigma_hat
208
-
209
- # store for 2nd order step
210
- self.prev_derivative = derivative
211
- self.dt = dt
212
- self.sample = sample
213
- else:
214
- # 2. 2nd order / Heun's method
215
- derivative = (sample - pred_original_sample) / sigma_hat
216
- derivative = (self.prev_derivative + derivative) / 2
217
-
218
- # 3. take prev timestep & sample
219
- dt = self.dt
220
- sample = self.sample
221
-
222
- # free dt and derivative
223
- # Note, this puts the scheduler in "first order mode"
224
- self.prev_derivative = None
225
- self.dt = None
226
- self.sample = None
227
-
228
- prev_sample = sample + derivative * dt
229
-
230
- if not return_dict:
231
- return (prev_sample,)
232
-
233
- return SchedulerOutput(prev_sample=prev_sample)
234
-
235
- def add_noise(
236
- self,
237
- original_samples: paddle.Tensor,
238
- noise: paddle.Tensor,
239
- timesteps: paddle.Tensor,
240
- ) -> paddle.Tensor:
241
- # Make sure sigmas and timesteps have the same dtype as original_samples
242
- self.sigmas = self.sigmas.cast(original_samples.dtype)
243
-
244
- step_indices = [self.index_for_timestep(t) for t in timesteps]
245
-
246
- sigma = self.sigmas[step_indices].flatten()
247
- while len(sigma.shape) < len(original_samples.shape):
248
- sigma = sigma.unsqueeze(-1)
249
-
250
- noisy_samples = original_samples + noise * sigma
251
- return noisy_samples
252
-
253
- def __len__(self):
254
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_vq_diffusion.py DELETED
@@ -1,496 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 Microsoft and The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from dataclasses import dataclass
17
- from typing import List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import paddle
21
- import paddle.nn.functional as F
22
-
23
- from ..configuration_utils import ConfigMixin, register_to_config
24
- from ..utils import BaseOutput
25
- from .scheduling_utils import SchedulerMixin
26
-
27
-
28
- def logaddexp(a, b):
29
- return paddle.log(a.exp() + b.exp())
30
-
31
-
32
- # (TODO junnyu) paddle logsumexp may has bug
33
- def logsumexp(x, axis=None, keepdim=False):
34
- return paddle.log(x.exp().sum(axis=axis, keepdim=keepdim))
35
-
36
-
37
- @dataclass
38
- class VQDiffusionSchedulerOutput(BaseOutput):
39
- """
40
- Output class for the scheduler's step function output.
41
-
42
- Args:
43
- prev_sample (`paddle.Tensor` of shape `(batch size, num latent pixels)`):
44
- Computed sample x_{t-1} of previous timestep. `prev_sample` should be used as next model input in the
45
- denoising loop.
46
- """
47
-
48
- prev_sample: paddle.Tensor
49
-
50
-
51
- def index_to_log_onehot(x: paddle.Tensor, num_classes: int) -> paddle.Tensor:
52
- """
53
- Convert batch of vector of class indices into batch of log onehot vectors
54
-
55
- Args:
56
- x (`paddle.Tensor` of shape `(batch size, vector length)`):
57
- Batch of class indices
58
-
59
- num_classes (`int`):
60
- number of classes to be used for the onehot vectors
61
-
62
- Returns:
63
- `paddle.Tensor` of shape `(batch size, num classes, vector length)`:
64
- Log onehot vectors
65
- """
66
- x_onehot = F.one_hot(x, num_classes)
67
- x_onehot = x_onehot.transpose([0, 2, 1])
68
- log_x = paddle.log(x_onehot.cast("float32").clip(min=1e-30))
69
- return log_x
70
-
71
-
72
- def gumbel_noised(logits: paddle.Tensor, generator: Optional[paddle.Generator]) -> paddle.Tensor:
73
- """
74
- Apply gumbel noise to `logits`
75
- """
76
- uniform = paddle.rand(logits.shape, generator=generator)
77
- gumbel_noise = -paddle.log(-paddle.log(uniform + 1e-30) + 1e-30)
78
- noised = gumbel_noise + logits
79
- return noised
80
-
81
-
82
- def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=0.000009):
83
- """
84
- Cumulative and non-cumulative alpha schedules.
85
-
86
- See section 4.1.
87
- """
88
- att = (
89
- np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start)
90
- + alpha_cum_start
91
- )
92
- att = np.concatenate(([1], att))
93
- at = att[1:] / att[:-1]
94
- att = np.concatenate((att[1:], [1]))
95
- return at, att
96
-
97
-
98
- def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=0.000009, gamma_cum_end=0.99999):
99
- """
100
- Cumulative and non-cumulative gamma schedules.
101
-
102
- See section 4.1.
103
- """
104
- ctt = (
105
- np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start)
106
- + gamma_cum_start
107
- )
108
- ctt = np.concatenate(([0], ctt))
109
- one_minus_ctt = 1 - ctt
110
- one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1]
111
- ct = 1 - one_minus_ct
112
- ctt = np.concatenate((ctt[1:], [0]))
113
- return ct, ctt
114
-
115
-
116
- class VQDiffusionScheduler(SchedulerMixin, ConfigMixin):
117
- """
118
- The VQ-diffusion transformer outputs predicted probabilities of the initial unnoised image.
119
-
120
- The VQ-diffusion scheduler converts the transformer's output into a sample for the unnoised image at the previous
121
- diffusion timestep.
122
-
123
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
124
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
125
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
126
- [`~SchedulerMixin.from_pretrained`] functions.
127
-
128
- For more details, see the original paper: https://arxiv.org/abs/2111.14822
129
-
130
- Args:
131
- num_vec_classes (`int`):
132
- The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked
133
- latent pixel.
134
-
135
- num_train_timesteps (`int`):
136
- Number of diffusion steps used to train the model.
137
-
138
- alpha_cum_start (`float`):
139
- The starting cumulative alpha value.
140
-
141
- alpha_cum_end (`float`):
142
- The ending cumulative alpha value.
143
-
144
- gamma_cum_start (`float`):
145
- The starting cumulative gamma value.
146
-
147
- gamma_cum_end (`float`):
148
- The ending cumulative gamma value.
149
- """
150
-
151
- order = 1
152
-
153
- @register_to_config
154
- def __init__(
155
- self,
156
- num_vec_classes: int,
157
- num_train_timesteps: int = 100,
158
- alpha_cum_start: float = 0.99999,
159
- alpha_cum_end: float = 0.000009,
160
- gamma_cum_start: float = 0.000009,
161
- gamma_cum_end: float = 0.99999,
162
- ):
163
- self.num_embed = num_vec_classes
164
-
165
- # By convention, the index for the mask class is the last class index
166
- self.mask_class = self.num_embed - 1
167
-
168
- at, att = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end)
169
- ct, ctt = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end)
170
-
171
- num_non_mask_classes = self.num_embed - 1
172
- bt = (1 - at - ct) / num_non_mask_classes
173
- btt = (1 - att - ctt) / num_non_mask_classes
174
-
175
- at = paddle.to_tensor(at.astype("float64"))
176
- bt = paddle.to_tensor(bt.astype("float64"))
177
- ct = paddle.to_tensor(ct.astype("float64"))
178
- log_at = paddle.log(at)
179
- log_bt = paddle.log(bt)
180
- log_ct = paddle.log(ct)
181
-
182
- att = paddle.to_tensor(att.astype("float64"))
183
- btt = paddle.to_tensor(btt.astype("float64"))
184
- ctt = paddle.to_tensor(ctt.astype("float64"))
185
- log_cumprod_at = paddle.log(att)
186
- log_cumprod_bt = paddle.log(btt)
187
- log_cumprod_ct = paddle.log(ctt)
188
-
189
- self.log_at = log_at.cast("float32")
190
- self.log_bt = log_bt.cast("float32")
191
- self.log_ct = log_ct.cast("float32")
192
- self.log_cumprod_at = log_cumprod_at.cast("float32")
193
- self.log_cumprod_bt = log_cumprod_bt.cast("float32")
194
- self.log_cumprod_ct = log_cumprod_ct.cast("float32")
195
-
196
- # setable values
197
- self.num_inference_steps = None
198
- self.timesteps = paddle.to_tensor(np.arange(0, num_train_timesteps)[::-1].copy())
199
-
200
- def set_timesteps(self, num_inference_steps: int):
201
- """
202
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
203
-
204
- Args:
205
- num_inference_steps (`int`):
206
- the number of diffusion steps used when generating samples with a pre-trained model.
207
- """
208
- self.num_inference_steps = num_inference_steps
209
- timesteps = np.arange(0, self.num_inference_steps)[::-1].copy()
210
- self.timesteps = paddle.to_tensor(timesteps)
211
-
212
- def step(
213
- self,
214
- model_output: paddle.Tensor,
215
- timestep: paddle.Tensor,
216
- sample: paddle.Tensor,
217
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
218
- return_dict: bool = True,
219
- ) -> Union[VQDiffusionSchedulerOutput, Tuple]:
220
- """
221
- Predict the sample at the previous timestep via the reverse transition distribution i.e. Equation (11). See the
222
- docstring for `self.q_posterior` for more in depth docs on how Equation (11) is computed.
223
-
224
- Args:
225
- log_p_x_0: (`paddle.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`):
226
- The log probabilities for the predicted classes of the initial latent pixels. Does not include a
227
- prediction for the masked class as the initial unnoised image cannot be masked.
228
-
229
- t (`paddle.Tensor`):
230
- The timestep that determines which transition matrices are used.
231
-
232
- x_t: (`paddle.Tensor` of shape `(batch size, num latent pixels)`):
233
- The classes of each latent pixel at time `t`
234
-
235
- generator: (`paddle.Generator` or None):
236
- RNG for the noise applied to p(x_{t-1} | x_t) before it is sampled from.
237
-
238
- return_dict (`bool`):
239
- option for returning tuple rather than VQDiffusionSchedulerOutput class
240
-
241
- Returns:
242
- [`~schedulers.scheduling_utils.VQDiffusionSchedulerOutput`] or `tuple`:
243
- [`~schedulers.scheduling_utils.VQDiffusionSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
244
- When returning a tuple, the first element is the sample tensor.
245
- """
246
- if timestep == 0:
247
- log_p_x_t_min_1 = model_output
248
- else:
249
- log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep)
250
-
251
- log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator)
252
-
253
- x_t_min_1 = log_p_x_t_min_1.argmax(axis=1)
254
-
255
- if not return_dict:
256
- return (x_t_min_1,)
257
-
258
- return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1)
259
-
260
- def q_posterior(self, log_p_x_0, x_t, t):
261
- """
262
- Calculates the log probabilities for the predicted classes of the image at timestep `t-1`. I.e. Equation (11).
263
-
264
- Instead of directly computing equation (11), we use Equation (5) to restate Equation (11) in terms of only
265
- forward probabilities.
266
-
267
- Equation (11) stated in terms of forward probabilities via Equation (5):
268
-
269
- Where:
270
- - the sum is over x_0 = {C_0 ... C_{k-1}} (classes for x_0)
271
-
272
- p(x_{t-1} | x_t) = sum( q(x_t | x_{t-1}) * q(x_{t-1} | x_0) * p(x_0) / q(x_t | x_0) )
273
-
274
- Args:
275
- log_p_x_0: (`paddle.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`):
276
- The log probabilities for the predicted classes of the initial latent pixels. Does not include a
277
- prediction for the masked class as the initial unnoised image cannot be masked.
278
-
279
- x_t: (`paddle.Tensor` of shape `(batch size, num latent pixels)`):
280
- The classes of each latent pixel at time `t`
281
-
282
- t (paddle.Tensor):
283
- The timestep that determines which transition matrix is used.
284
-
285
- Returns:
286
- `paddle.Tensor` of shape `(batch size, num classes, num latent pixels)`:
287
- The log probabilities for the predicted classes of the image at timestep `t-1`. I.e. Equation (11).
288
- """
289
- log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed)
290
-
291
- log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class(
292
- t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True
293
- )
294
-
295
- log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class(
296
- t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False
297
- )
298
-
299
- # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0)
300
- # . . .
301
- # . . .
302
- # . . .
303
- # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1})
304
- q = log_p_x_0 - log_q_x_t_given_x_0
305
-
306
- # sum_0 = p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}), ... ,
307
- # sum_n = p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1})
308
- q_log_sum_exp = logsumexp(q, axis=1, keepdim=True)
309
-
310
- # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0 ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n
311
- # . . .
312
- # . . .
313
- # . . .
314
- # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0 ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n
315
- q = q - q_log_sum_exp
316
-
317
- # (p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}
318
- # . . .
319
- # . . .
320
- # . . .
321
- # (p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}
322
- # c_cumulative_{t-1} ... c_cumulative_{t-1}
323
- q = self.apply_cumulative_transitions(q, t - 1)
324
-
325
- # ((p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_0 ... ((p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_n
326
- # . . .
327
- # . . .
328
- # . . .
329
- # ((p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_0 ... ((p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_n
330
- # c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 ... c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0
331
- log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp
332
-
333
- # For each column, there are two possible cases.
334
- #
335
- # Where:
336
- # - sum(p_n(x_0))) is summing over all classes for x_0
337
- # - C_i is the class transitioning from (not to be confused with c_t and c_cumulative_t being used for gamma's)
338
- # - C_j is the class transitioning to
339
- #
340
- # 1. x_t is masked i.e. x_t = c_k
341
- #
342
- # Simplifying the expression, the column vector is:
343
- # .
344
- # .
345
- # .
346
- # (c_t / c_cumulative_t) * (a_cumulative_{t-1} * p_n(x_0 = C_i | x_t) + b_cumulative_{t-1} * sum(p_n(x_0)))
347
- # .
348
- # .
349
- # .
350
- # (c_cumulative_{t-1} / c_cumulative_t) * sum(p_n(x_0))
351
- #
352
- # From equation (11) stated in terms of forward probabilities, the last row is trivially verified.
353
- #
354
- # For the other rows, we can state the equation as ...
355
- #
356
- # (c_t / c_cumulative_t) * [b_cumulative_{t-1} * p(x_0=c_0) + ... + (a_cumulative_{t-1} + b_cumulative_{t-1}) * p(x_0=C_i) + ... + b_cumulative_{k-1} * p(x_0=c_{k-1})]
357
- #
358
- # This verifies the other rows.
359
- #
360
- # 2. x_t is not masked
361
- #
362
- # Simplifying the expression, there are two cases for the rows of the column vector, where C_j = C_i and where C_j != C_i:
363
- # .
364
- # .
365
- # .
366
- # C_j != C_i: b_t * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / b_cumulative_t) * p_n(x_0 = C_i) + ... + (b_cumulative_{t-1} / (a_cumulative_t + b_cumulative_t)) * p_n(c_0=C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1}))
367
- # .
368
- # .
369
- # .
370
- # C_j = C_i: (a_t + b_t) * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / (a_cumulative_t + b_cumulative_t)) * p_n(x_0 = C_i = C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1}))
371
- # .
372
- # .
373
- # .
374
- # 0
375
- #
376
- # The last row is trivially verified. The other rows can be verified by directly expanding equation (11) stated in terms of forward probabilities.
377
- return log_p_x_t_min_1
378
-
379
- def log_Q_t_transitioning_to_known_class(
380
- self, *, t: paddle.Tensor, x_t: paddle.Tensor, log_onehot_x_t: paddle.Tensor, cumulative: bool
381
- ):
382
- """
383
- Returns the log probabilities of the rows from the (cumulative or non-cumulative) transition matrix for each
384
- latent pixel in `x_t`.
385
-
386
- See equation (7) for the complete non-cumulative transition matrix. The complete cumulative transition matrix
387
- is the same structure except the parameters (alpha, beta, gamma) are the cumulative analogs.
388
-
389
- Args:
390
- t (paddle.Tensor):
391
- The timestep that determines which transition matrix is used.
392
-
393
- x_t (`paddle.Tensor` of shape `(batch size, num latent pixels)`):
394
- The classes of each latent pixel at time `t`.
395
-
396
- log_onehot_x_t (`paddle.Tensor` of shape `(batch size, num classes, num latent pixels)`):
397
- The log one-hot vectors of `x_t`
398
-
399
- cumulative (`bool`):
400
- If cumulative is `False`, we use the single step transition matrix `t-1`->`t`. If cumulative is `True`,
401
- we use the cumulative transition matrix `0`->`t`.
402
-
403
- Returns:
404
- `paddle.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`:
405
- Each _column_ of the returned matrix is a _row_ of log probabilities of the complete probability
406
- transition matrix.
407
-
408
- When non cumulative, returns `self.num_classes - 1` rows because the initial latent pixel cannot be
409
- masked.
410
-
411
- Where:
412
- - `q_n` is the probability distribution for the forward process of the `n`th latent pixel.
413
- - C_0 is a class of a latent pixel embedding
414
- - C_k is the class of the masked latent pixel
415
-
416
- non-cumulative result (omitting logarithms):
417
- ```
418
- q_0(x_t | x_{t-1} = C_0) ... q_n(x_t | x_{t-1} = C_0)
419
- . . .
420
- . . .
421
- . . .
422
- q_0(x_t | x_{t-1} = C_k) ... q_n(x_t | x_{t-1} = C_k)
423
- ```
424
-
425
- cumulative result (omitting logarithms):
426
- ```
427
- q_0_cumulative(x_t | x_0 = C_0) ... q_n_cumulative(x_t | x_0 = C_0)
428
- . . .
429
- . . .
430
- . . .
431
- q_0_cumulative(x_t | x_0 = C_{k-1}) ... q_n_cumulative(x_t | x_0 = C_{k-1})
432
- ```
433
- """
434
- if cumulative:
435
- a = self.log_cumprod_at[t]
436
- b = self.log_cumprod_bt[t]
437
- c = self.log_cumprod_ct[t]
438
- else:
439
- a = self.log_at[t]
440
- b = self.log_bt[t]
441
- c = self.log_ct[t]
442
-
443
- if not cumulative:
444
- # The values in the onehot vector can also be used as the logprobs for transitioning
445
- # from masked latent pixels. If we are not calculating the cumulative transitions,
446
- # we need to save these vectors to be re-appended to the final matrix so the values
447
- # aren't overwritten.
448
- #
449
- # `P(x_t!=mask|x_{t-1=mask}) = 0` and 0 will be the value of the last row of the onehot vector
450
- # if x_t is not masked
451
- #
452
- # `P(x_t=mask|x_{t-1=mask}) = 1` and 1 will be the value of the last row of the onehot vector
453
- # if x_t is masked
454
- log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1)
455
-
456
- # `index_to_log_onehot` will add onehot vectors for masked pixels,
457
- # so the default one hot matrix has one too many rows. See the doc string
458
- # for an explanation of the dimensionality of the returned matrix.
459
- log_onehot_x_t = log_onehot_x_t[:, :-1, :]
460
-
461
- # this is a cheeky trick to produce the transition probabilities using log one-hot vectors.
462
- #
463
- # Don't worry about what values this sets in the columns that mark transitions
464
- # to masked latent pixels. They are overwrote later with the `mask_class_mask`.
465
- #
466
- # Looking at the below logspace formula in non-logspace, each value will evaluate to either
467
- # `1 * a + b = a + b` where `log_Q_t` has the one hot value in the column
468
- # or
469
- # `0 * a + b = b` where `log_Q_t` has the 0 values in the column.
470
- #
471
- # See equation 7 for more details.
472
- log_Q_t = logaddexp(log_onehot_x_t + a, b)
473
-
474
- # The whole column of each masked pixel is `c`
475
- mask_class_mask = x_t == self.mask_class
476
- mask_class_mask = mask_class_mask.unsqueeze(1).expand([-1, self.num_embed - 1, -1])
477
- log_Q_t[mask_class_mask] = c
478
-
479
- if not cumulative:
480
- log_Q_t = paddle.concat((log_Q_t, log_onehot_x_t_transitioning_from_masked), axis=1)
481
-
482
- return log_Q_t
483
-
484
- def apply_cumulative_transitions(self, q, t):
485
- bsz = q.shape[0]
486
- a = self.log_cumprod_at[t]
487
- b = self.log_cumprod_bt[t]
488
- c = self.log_cumprod_ct[t]
489
-
490
- num_latent_pixels = q.shape[2]
491
- c = c.expand([bsz, 1, num_latent_pixels])
492
-
493
- q = logaddexp(q + a, b)
494
- q = paddle.concat((q, c), axis=1)
495
-
496
- return q
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/speaker.py DELETED
@@ -1,40 +0,0 @@
1
- from speaker_encoder.data_objects.random_cycler import RandomCycler
2
- from speaker_encoder.data_objects.utterance import Utterance
3
- from pathlib import Path
4
-
5
- # Contains the set of utterances of a single speaker
6
- class Speaker:
7
- def __init__(self, root: Path):
8
- self.root = root
9
- self.name = root.name
10
- self.utterances = None
11
- self.utterance_cycler = None
12
-
13
- def _load_utterances(self):
14
- with self.root.joinpath("_sources.txt").open("r") as sources_file:
15
- sources = [l.split(",") for l in sources_file]
16
- sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources}
17
- self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()]
18
- self.utterance_cycler = RandomCycler(self.utterances)
19
-
20
- def random_partial(self, count, n_frames):
21
- """
22
- Samples a batch of <count> unique partial utterances from the disk in a way that all
23
- utterances come up at least once every two cycles and in a random order every time.
24
-
25
- :param count: The number of partial utterances to sample from the set of utterances from
26
- that speaker. Utterances are guaranteed not to be repeated if <count> is not larger than
27
- the number of utterances available.
28
- :param n_frames: The number of frames in the partial utterance.
29
- :return: A list of tuples (utterance, frames, range) where utterance is an Utterance,
30
- frames are the frames of the partial utterances and range is the range of the partial
31
- utterance with regard to the complete utterance.
32
- """
33
- if self.utterances is None:
34
- self._load_utterances()
35
-
36
- utterances = self.utterance_cycler.sample(count)
37
-
38
- a = [(u,) + u.random_partial(n_frames) for u in utterances]
39
-
40
- return a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4th3n4/TraDeX/app.py DELETED
@@ -1,590 +0,0 @@
1
- # %%
2
- # Import section
3
- # (Please don't edit this section unless if necessary)
4
- import copy
5
- from pathlib import Path
6
- import warnings
7
- import holidays
8
- import seaborn as sns
9
- import matplotlib
10
- import matplotlib.dates as mdates
11
- import matplotlib.pyplot as plt
12
- plt.style.use('fivethirtyeight')
13
- import numpy as np
14
- import pandas as pd
15
- import glob
16
- import csv
17
- import lightning.pytorch as pl
18
- from lightning.pytorch.callbacks import EarlyStopping, LearningRateMonitor
19
- from lightning.pytorch.loggers import TensorBoardLogger
20
- import torch
21
- from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
22
- from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder
23
- from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss
24
- from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
25
- import random
26
- import gc
27
- import tensorflow as tf
28
- import tensorboard as tb
29
- tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
30
- import os
31
- import math
32
- import sys
33
- from sklearn.model_selection import train_test_split
34
- from sklearn.preprocessing import MinMaxScaler
35
- import tensorflow as tf
36
- from tensorflow.keras.layers import Conv1D, LSTM, Dense, Dropout, Bidirectional, TimeDistributed
37
- from tensorflow.keras.layers import MaxPooling1D, Flatten
38
- from tensorflow.keras.regularizers import L1, L2
39
- from tensorflow.keras.metrics import Accuracy
40
- from tensorflow.keras.metrics import RootMeanSquaredError
41
- from sklearn.metrics import mean_squared_error as MSE
42
- from sklearn.model_selection import KFold
43
- from sklearn.inspection import permutation_importance
44
- from tensorflow.keras.utils import plot_model
45
- from sklearn.metrics import explained_variance_score, mean_poisson_deviance, mean_gamma_deviance, mean_squared_error, mean_squared_log_error, d2_absolute_error_score, d2_pinball_score, d2_tweedie_score
46
- from sklearn.metrics import r2_score
47
- from sklearn.metrics import max_error
48
- import datetime
49
- from datetime import date
50
- import optuna
51
- from tensorflow.keras.callbacks import Callback
52
- from optuna.integration import TFKerasPruningCallback
53
- import shutil
54
- import gradio as gr
55
-
56
- # Some variables (don't edit these variables unless if necessary)
57
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
58
- random.seed(30)
59
- np.random.seed(30)
60
- tf.random.set_seed(30)
61
- torch.manual_seed(30)
62
- torch.cuda.manual_seed(30)
63
-
64
- # Global variables
65
- PATIENCE = 30
66
- MAX_EPOCHS = 3
67
- LEARNING_RATE = 0.01
68
- OPTUNA = True
69
- ACCELERATOR = "cpu"
70
- # This below line is only for GPU. Don't use it for CPU
71
- #os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"
72
-
73
- # Variables to count the number of files
74
- w = 7
75
- prax = [0 for x in range(w)]
76
-
77
- # %%
78
- # Function to train the model (TFT)
79
- def modelTFT(csv_file, prax):
80
- train = csv_file
81
- #test = pd.read_csv("/kaggle/input/artemis-test/nifty_daily.csv")
82
- train['date'] = pd.to_datetime(train['Date/Time'])
83
- #test['date'] = pd.to_datetime(test['Date'])
84
-
85
- data = pd.concat([train], axis = 0, ignore_index=True)
86
- # Check that key is country-store-product-date combination
87
- #assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data)
88
- # Check that there is one date per country-store-product combination
89
- #assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data)//data['date'].nunique()
90
-
91
- #display(train.sample(4))
92
-
93
- # Add a time_idx (an sequence of consecutive integers that goes from min to max date)
94
-
95
- data = (data.merge((data[['Date/Time']].drop_duplicates(ignore_index=True)
96
- .rename_axis('time_idx')).reset_index(), on = ['Date/Time']))
97
- # add additional features
98
- data["day_of_week"] = data['date'].dt.dayofweek.astype(str).astype("category") # categories have be strings
99
- data["week_of_year"] = data['date'].dt.isocalendar().week.astype(str).astype("category") # categories have be strings
100
- data["month"] = data['date'].dt.month.astype(str).astype("category") # categories have be strings
101
- #data["log_num_sold"] = np.log(data.num_sold + 1e-8)
102
- #data["avg_volume_by_country"] = data.groupby(["time_idx", "country"], observed=True).num_sold.transform("mean")
103
- #data["avg_volume_by_store"] = data.groupby(["time_idx", "store"], observed=True).num_sold.transform("mean")
104
- #data["avg_volume_by_product"] = data.groupby(["time_idx", "product"], observed=True).num_sold.transform("mean")
105
-
106
- #unique_dates_country = data[['date', 'Ticker']].drop_duplicates(ignore_index = True)
107
- #unique_dates_country['is_holiday'] = (unique_dates_country
108
- # .apply(lambda x: x.date in holidays.country_holidays(x.country), axis = 1).astype('category'))
109
- #unique_dates_country['is_holiday_lead_1'] = (unique_dates_country
110
- # .apply(lambda x: x.date+pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
111
- #unique_dates_country['is_holiday_lead_2'] = (unique_dates_country
112
- # .apply(lambda x: x.date+pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
113
- #unique_dates_country['is_holiday_lag_1'] = (unique_dates_country
114
- # .apply(lambda x: x.date-pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
115
- #unique_dates_country['is_holiday_lag_2'] = (unique_dates_country
116
- # .apply(lambda x: x.date-pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
117
- #data = data.merge(unique_dates_country, on = ['date', 'Ticker'], validate = "m:1")
118
- #del unique_dates_country
119
- gc.collect()
120
- data.sample(5, random_state=30)
121
-
122
- train = data.iloc[:len(train)]
123
- test = data.iloc[len(train):]
124
-
125
- max_prediction_length = 2
126
- max_encoder_length = train.date.nunique()
127
- training_cutoff = train["time_idx"].max() - max_prediction_length #we will validate on 2020
128
-
129
- # Let's create a Dataset
130
- training = TimeSeriesDataSet(
131
- train[lambda x: x.time_idx <= training_cutoff],
132
- time_idx="time_idx",
133
- target="Close",
134
- group_ids=["Ticker"],
135
- min_encoder_length=max_prediction_length, # keep encoder length long (as it is in the validation set)
136
- max_encoder_length=max_encoder_length,
137
- max_prediction_length=max_prediction_length,
138
- static_categoricals=["Ticker"],
139
- time_varying_known_categoricals=["month", "week_of_year", "day_of_week"],
140
- #variable_groups={"is_holiday": ["is_holiday"]}, # group of categorical variables can be treated as one variable
141
- time_varying_known_reals=["time_idx"],
142
- time_varying_unknown_categoricals=[],
143
- time_varying_unknown_reals=[
144
- 'Open','High','Low','Close','OI','RSI14','RSI44','HHRSI','Rsi Weekly','LLCHHV','white','Vap44','Vap14','Ema5','Ema20','Ema50','Ema200'
145
- ],
146
- target_normalizer=GroupNormalizer(
147
- groups=['Ticker'], transformation="softplus"
148
- ), # use softplus and normalize by group
149
- categorical_encoders={
150
- 'week_of_year':NaNLabelEncoder(add_nan=True)
151
- },
152
- #lags={'num_sold': [7, 30, 365]},
153
- add_relative_time_idx=True,
154
- add_target_scales=True,
155
- add_encoder_length=True,
156
- )
157
-
158
- # create validation set (predict=True) which means to predict the last max_prediction_length points in time
159
- # for each series
160
- validation = TimeSeriesDataSet.from_dataset(training, train, predict=True, stop_randomization=True)
161
-
162
- # create dataloaders for model
163
- batch_size = 128 # set this between 32 to 128
164
- train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
165
- val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
166
-
167
- #let's see how a naive model does
168
-
169
- actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)])#.cuda()
170
- baseline_predictions = Baseline().predict(val_dataloader)#.cuda()
171
- (actuals - baseline_predictions).abs().mean().item()
172
-
173
- sm = SMAPE()
174
-
175
- print(f"Median loss for naive prediction on validation: {sm.loss(actuals, baseline_predictions).mean(axis = 1).median().item()}")
176
-
177
- early_stop_callback = EarlyStopping(monitor="train_loss", min_delta=1e-2, patience=PATIENCE, verbose=False, mode="min")
178
- lr_logger = LearningRateMonitor() # log the learning rate
179
- logger = TensorBoardLogger("lightning_logs") # logging results to a tensorboard
180
-
181
- trainer = pl.Trainer(
182
- max_epochs=1,
183
- accelerator=ACCELERATOR,
184
- enable_model_summary=False,
185
- gradient_clip_val=0.25,
186
- limit_train_batches=10, # coment in for training, running valiation every 30 batches
187
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
188
- callbacks=[lr_logger, early_stop_callback],
189
- logger=logger,
190
- )
191
-
192
- tft = TemporalFusionTransformer.from_dataset(
193
- training,
194
- learning_rate=LEARNING_RATE,
195
- lstm_layers=2,
196
- hidden_size=16,
197
- attention_head_size=2,
198
- dropout=0.2,
199
- hidden_continuous_size=8,
200
- output_size=1, # 7 quantiles by default
201
- loss=SMAPE(),
202
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
203
- reduce_on_plateau_patience=4
204
- )
205
-
206
- tft.to(DEVICE)
207
- trainer.fit(
208
- tft,
209
- train_dataloaders=train_dataloader,
210
- val_dataloaders=val_dataloader,
211
- )
212
- #torch.cuda.empty_cache()
213
- #print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
214
-
215
- if OPTUNA:
216
- from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
217
-
218
- # create study
219
- study = optimize_hyperparameters(
220
- train_dataloader,
221
- val_dataloader,
222
- model_path="optuna_test",
223
- n_trials=5,
224
- max_epochs=MAX_EPOCHS,
225
- gradient_clip_val_range=(0.01, 0.3),
226
- hidden_size_range=(8, 24),
227
- hidden_continuous_size_range=(8, 12),
228
- attention_head_size_range=(2, 4),
229
- learning_rate_range=(0.01, 0.05),
230
- dropout_range=(0.1, 0.25),
231
- trainer_kwargs=dict(limit_train_batches=20),
232
- reduce_on_plateau_patience=4,
233
- pruner=optuna.pruners.MedianPruner(n_min_trials=3, n_startup_trials=3),
234
- use_learning_rate_finder=False, # use Optuna to find ideal learning rate or use in-built learning rate finder
235
- )
236
- #torch.cuda.empty_cache()
237
- #'''
238
- trainer = pl.Trainer(
239
- max_epochs=MAX_EPOCHS,
240
- accelerator=ACCELERATOR,
241
- enable_model_summary=False,
242
- gradient_clip_val=study.best_params['gradient_clip_val'],
243
- limit_train_batches=20, # coment in for training, running valiation every 30 batches
244
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
245
- callbacks=[lr_logger, early_stop_callback],
246
- logger=logger,
247
- )
248
-
249
- tft = TemporalFusionTransformer.from_dataset(
250
- training,
251
- learning_rate=study.best_params['learning_rate'],
252
- lstm_layers=2,
253
- hidden_size=study.best_params['hidden_size'],
254
- attention_head_size=study.best_params['attention_head_size'],
255
- dropout=study.best_params['dropout'],
256
- hidden_continuous_size=study.best_params['hidden_continuous_size'],
257
- output_size=1, # 7 quantiles by default
258
- loss=SMAPE(),
259
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
260
- reduce_on_plateau_patience=4
261
- )
262
-
263
- tft.to(DEVICE)
264
- trainer.fit(
265
- tft,
266
- train_dataloaders=train_dataloader,
267
- val_dataloaders=val_dataloader,
268
- )
269
- #'''
270
- #torch.cuda.empty_cache()
271
- best_model_path = trainer.checkpoint_callback.best_model_path
272
- best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
273
- actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])#.cuda()
274
- predictions = best_tft.predict(val_dataloader, mode="prediction")
275
- raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
276
-
277
- sm = SMAPE()
278
- print(f"Validation median SMAPE loss: {sm.loss(actuals, predictions).mean(axis = 1).median().item()}")
279
- prax[5] = sm.loss(actuals, predictions).mean(axis = 1).median().item()
280
- #best_tft.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True);
281
-
282
- print(raw_predictions[0][0])
283
- prax[3] = '-'
284
- prax[4] = raw_predictions[0][0].data.cpu().tolist()[0][0]
285
- t = prax[4]
286
- tm = data['Close'][len(data)-1]
287
- if(t-tm>0):
288
- prax[6] = 1
289
- elif(t-tm==0):
290
- prax[6] = 0
291
- else:
292
- prax[6] = -1
293
- #prax[i][3] = raw_predictions[0][0].data[1]
294
- print("-----------")
295
-
296
- #with open("out.csv", "w", newline="") as f:
297
- # writer = csv.writer(f)
298
- # writer.writerows(prax)
299
-
300
- # %%
301
- # Function to train the model (TFT)
302
- def modelTFT_OpenGap(csv_file, prax):
303
- train = csv_file
304
- #test = pd.read_csv("/kaggle/input/artemis-test/nifty_daily.csv")
305
- train['date'] = pd.to_datetime(train['Date/Time'])
306
- #test['date'] = pd.to_datetime(test['Date'])
307
- datLength = len(train)
308
- train['O-C'] = 0
309
- for i in range(datLength):
310
- if i == 0:
311
- train['O-C'][i] = 0
312
- continue
313
- else:
314
- train['O-C'][i] = train['Open'][i] - train['Close'][i-1]
315
- data = pd.concat([train], axis = 0, ignore_index=True)
316
- # Check that key is country-store-product-date combination
317
- #assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data)
318
- # Check that there is one date per country-store-product combination
319
- #assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data)//data['date'].nunique()
320
-
321
- #display(train.sample(4))
322
-
323
- # Add a time_idx (an sequence of consecutive integers that goes from min to max date)
324
-
325
- data = (data.merge((data[['Date/Time']].drop_duplicates(ignore_index=True)
326
- .rename_axis('time_idx')).reset_index(), on = ['Date/Time']))
327
- # add additional features
328
- data["day_of_week"] = data['date'].dt.dayofweek.astype(str).astype("category") # categories have be strings
329
- data["week_of_year"] = data['date'].dt.isocalendar().week.astype(str).astype("category") # categories have be strings
330
- data["month"] = data['date'].dt.month.astype(str).astype("category") # categories have be strings
331
- #data["log_num_sold"] = np.log(data.num_sold + 1e-8)
332
- #data["avg_volume_by_country"] = data.groupby(["time_idx", "country"], observed=True).num_sold.transform("mean")
333
- #data["avg_volume_by_store"] = data.groupby(["time_idx", "store"], observed=True).num_sold.transform("mean")
334
- #data["avg_volume_by_product"] = data.groupby(["time_idx", "product"], observed=True).num_sold.transform("mean")
335
-
336
- #unique_dates_country = data[['date', 'Ticker']].drop_duplicates(ignore_index = True)
337
- #unique_dates_country['is_holiday'] = (unique_dates_country
338
- # .apply(lambda x: x.date in holidays.country_holidays(x.country), axis = 1).astype('category'))
339
- #unique_dates_country['is_holiday_lead_1'] = (unique_dates_country
340
- # .apply(lambda x: x.date+pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
341
- #unique_dates_country['is_holiday_lead_2'] = (unique_dates_country
342
- # .apply(lambda x: x.date+pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
343
- #unique_dates_country['is_holiday_lag_1'] = (unique_dates_country
344
- # .apply(lambda x: x.date-pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
345
- #unique_dates_country['is_holiday_lag_2'] = (unique_dates_country
346
- # .apply(lambda x: x.date-pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
347
- #data = data.merge(unique_dates_country, on = ['date', 'Ticker'], validate = "m:1")
348
- #del unique_dates_country
349
- gc.collect()
350
- data.sample(5, random_state=30)
351
-
352
- train = data.iloc[:len(train)]
353
- test = data.iloc[len(train):]
354
-
355
- max_prediction_length = 2
356
- max_encoder_length = train.date.nunique()
357
- training_cutoff = train["time_idx"].max() - max_prediction_length #we will validate on 2020
358
-
359
- # Let's create a Dataset
360
- training = TimeSeriesDataSet(
361
- train[lambda x: x.time_idx <= training_cutoff],
362
- time_idx="time_idx",
363
- target="Close",
364
- group_ids=["Ticker"],
365
- min_encoder_length=max_prediction_length, # keep encoder length long (as it is in the validation set)
366
- max_encoder_length=max_encoder_length,
367
- max_prediction_length=max_prediction_length,
368
- static_categoricals=["Ticker"],
369
- time_varying_known_categoricals=["month", "week_of_year", "day_of_week"],
370
- #variable_groups={"is_holiday": ["is_holiday"]}, # group of categorical variables can be treated as one variable
371
- time_varying_known_reals=["time_idx"],
372
- time_varying_unknown_categoricals=[],
373
- time_varying_unknown_reals=[
374
- 'Open','High','Low','Close','OI','RSI14','RSI44','HHRSI','Rsi Weekly','LLCHHV','white','Vap44','Vap14','Ema5','Ema20','Ema50','Ema200', 'O-C'
375
- ],
376
- target_normalizer=GroupNormalizer(
377
- groups=['Ticker'], transformation="softplus"
378
- ), # use softplus and normalize by group
379
- categorical_encoders={
380
- 'week_of_year':NaNLabelEncoder(add_nan=True)
381
- },
382
- #lags={'num_sold': [7, 30, 365]},
383
- add_relative_time_idx=True,
384
- add_target_scales=True,
385
- add_encoder_length=True,
386
- )
387
-
388
- # create validation set (predict=True) which means to predict the last max_prediction_length points in time
389
- # for each series
390
- validation = TimeSeriesDataSet.from_dataset(training, train, predict=True, stop_randomization=True)
391
-
392
- # create dataloaders for model
393
- batch_size = 128 # set this between 32 to 128
394
- train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
395
- val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
396
-
397
- #let's see how a naive model does
398
-
399
- actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)])#.cuda()
400
- baseline_predictions = Baseline().predict(val_dataloader)#.cuda()
401
- (actuals - baseline_predictions).abs().mean().item()
402
-
403
- sm = SMAPE()
404
-
405
- print(f"Median loss for naive prediction on validation: {sm.loss(actuals, baseline_predictions).mean(axis = 1).median().item()}")
406
-
407
- early_stop_callback = EarlyStopping(monitor="train_loss", min_delta=1e-2, patience=PATIENCE, verbose=False, mode="min")
408
- lr_logger = LearningRateMonitor() # log the learning rate
409
- logger = TensorBoardLogger("lightning_logs") # logging results to a tensorboard
410
-
411
- trainer = pl.Trainer(
412
- max_epochs=1,
413
- accelerator=ACCELERATOR,
414
- enable_model_summary=False,
415
- gradient_clip_val=0.25,
416
- limit_train_batches=10, # coment in for training, running valiation every 30 batches
417
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
418
- callbacks=[lr_logger, early_stop_callback],
419
- logger=logger,
420
- )
421
-
422
- tft = TemporalFusionTransformer.from_dataset(
423
- training,
424
- learning_rate=LEARNING_RATE,
425
- lstm_layers=2,
426
- hidden_size=16,
427
- attention_head_size=2,
428
- dropout=0.2,
429
- hidden_continuous_size=8,
430
- output_size=1, # 7 quantiles by default
431
- loss=SMAPE(),
432
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
433
- reduce_on_plateau_patience=4
434
- )
435
-
436
- tft.to(DEVICE)
437
- trainer.fit(
438
- tft,
439
- train_dataloaders=train_dataloader,
440
- val_dataloaders=val_dataloader,
441
- )
442
- #torch.cuda.empty_cache()
443
- #print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
444
-
445
- if OPTUNA:
446
- from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
447
-
448
- # create study
449
- study = optimize_hyperparameters(
450
- train_dataloader,
451
- val_dataloader,
452
- model_path="optuna_test",
453
- n_trials=5,
454
- max_epochs=MAX_EPOCHS,
455
- gradient_clip_val_range=(0.01, 0.3),
456
- hidden_size_range=(8, 24),
457
- hidden_continuous_size_range=(8, 12),
458
- attention_head_size_range=(2, 4),
459
- learning_rate_range=(0.01, 0.05),
460
- dropout_range=(0.1, 0.25),
461
- trainer_kwargs=dict(limit_train_batches=20),
462
- reduce_on_plateau_patience=4,
463
- pruner=optuna.pruners.MedianPruner(n_min_trials=3, n_warmup_steps=3),
464
- use_learning_rate_finder=False, # use Optuna to find ideal learning rate or use in-built learning rate finder
465
- )
466
- #torch.cuda.empty_cache()
467
- #'''
468
- trainer = pl.Trainer(
469
- max_epochs=MAX_EPOCHS,
470
- accelerator=ACCELERATOR,
471
- enable_model_summary=False,
472
- gradient_clip_val=study.best_params['gradient_clip_val'],
473
- limit_train_batches=20, # coment in for training, running valiation every 30 batches
474
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
475
- callbacks=[lr_logger, early_stop_callback],
476
- logger=logger,
477
- )
478
-
479
- tft = TemporalFusionTransformer.from_dataset(
480
- training,
481
- learning_rate=study.best_params['learning_rate'],
482
- lstm_layers=2,
483
- hidden_size=study.best_params['hidden_size'],
484
- attention_head_size=study.best_params['attention_head_size'],
485
- dropout=study.best_params['dropout'],
486
- hidden_continuous_size=study.best_params['hidden_continuous_size'],
487
- output_size=1, # 7 quantiles by default
488
- loss=SMAPE(),
489
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
490
- reduce_on_plateau_patience=4
491
- )
492
-
493
- tft.to(DEVICE)
494
- trainer.fit(
495
- tft,
496
- train_dataloaders=train_dataloader,
497
- val_dataloaders=val_dataloader,
498
- )
499
- #'''
500
- #torch.cuda.empty_cache()
501
- best_model_path = trainer.checkpoint_callback.best_model_path
502
- best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
503
- actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])#.cuda()
504
- predictions = best_tft.predict(val_dataloader, mode="prediction")
505
- raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
506
-
507
- sm = SMAPE()
508
- print(f"Validation median SMAPE loss: {sm.loss(actuals, predictions).mean(axis = 1).median().item()}")
509
- prax[5] = sm.loss(actuals, predictions).mean(axis = 1).median().item()
510
- #best_tft.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True);
511
-
512
- print(raw_predictions[0][0])
513
- prax[3] = '-'
514
- prax[4] = raw_predictions[0][0].data.cpu().tolist()[0][0]
515
- t = prax[4]
516
- tm = data['Close'][len(data)-1]
517
- if(t-tm>0):
518
- prax[6] = 1
519
- elif(t-tm==0):
520
- prax[6] = 0
521
- else:
522
- prax[6] = -1
523
- #prax[i][3] = raw_predictions[0][0].data[1]
524
- print("-----------")
525
-
526
- #with open("out.csv", "w", newline="") as f:
527
- # writer = csv.writer(f)
528
- # writer.writerows(prax)
529
-
530
- # %%
531
- def generate_csv(data_list):
532
- today = date.today().strftime("%Y_%m_%d")
533
- filename = f"result_{today}.csv"
534
- file_exists = os.path.isfile(filename)
535
- with open(filename, mode='a', newline='') as csv_file:
536
- fieldnames = ['Ticker', 'Prev_Close_Real', 'Model', 'Prev_Close_Model', 'Close_Model', 'Max_Err', 'Up_Down' ] # replace with your own column names
537
- writer = csv.writer(csv_file, delimiter=',')
538
- if not file_exists:
539
- writer.writerow(fieldnames) # file doesn't exist yet, write a header
540
- writer.writerow(data_list)
541
- csv_file.close()
542
-
543
- def guess_date(string):
544
- for fmt in ["%Y/%m/%d", "%d-%m-%Y", "%Y%m%d", "%m/%d/%Y", "%d/%m/%Y", "%Y-%m-%d", "%d/%m/%y", "%m/%d/%y"]:
545
- try:
546
- return datetime.datetime.strptime(string, fmt).date()
547
- except ValueError:
548
- continue
549
- raise ValueError(string)
550
-
551
- # %%
552
- # Main function
553
- def main(files):
554
- # Get a list of all the CSV files uploaded
555
- prax = [0,0,0,0,0,0,0]
556
- for idx, file in enumerate(files):
557
- print(f"File #{idx+1}: {file}")
558
- print(file.name)
559
- df = pd.read_csv(file.name)
560
- print(df['Ticker'][0])
561
- prax[0] = df['Ticker'][0]
562
- prax[1] = df['Close'][len(df)-1]
563
- print('------------------')
564
- #df = df.drop(['EMARSI'], axis=1)
565
- #df['Date/Time'] = pd.to_datetime(df['Date/Time'])
566
- for i in range(len(df)):
567
- x = guess_date(df['Date/Time'][i])
568
- df['Date/Time'][i] = x.strftime("%Y-%m-%d")
569
- df['Date/Time'] = pd.to_datetime(df['Date/Time'])
570
- df.fillna(0, inplace=True)
571
- #df.to_csv('out.csv')
572
- modelTFT(df, prax)
573
- prax[2] = "TFT"
574
- generate_csv(prax)
575
- modelTFT_OpenGap(df, prax)
576
- prax[2] = "TFT_OpenGap"
577
- generate_csv(prax)
578
- # Generate blank line
579
- prax=["","","","","","",""]
580
- generate_csv(prax)
581
- # Reset prax
582
- prax = [0,0,0,0,0,0,0]
583
- today = date.today().strftime("%Y_%m_%d")
584
- return f"result_{today}.csv"
585
-
586
- gradioApp = gr.Interface(fn=main, inputs=gr.File(file_count="multiple"), outputs="file")
587
-
588
- if __name__ == "__main__":
589
- # Calling main function
590
- gradioApp.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/indexed_datasets.py DELETED
@@ -1,77 +0,0 @@
1
- import pickle
2
- from copy import deepcopy
3
-
4
- import numpy as np
5
-
6
-
7
- class IndexedDataset:
8
- def __init__(self, path, num_cache=0):
9
- super().__init__()
10
- self.path = path
11
- self.data_file = None
12
- self.data_offsets = np.load(f"{path}.idx", allow_pickle=True).item()['offsets']
13
- self.data_file = open(f"{path}.data", 'rb', buffering=-1)
14
- # self.cache = []
15
- self.cache = {}
16
- self.num_cache = num_cache
17
-
18
- def check_index(self, i):
19
- if i < 0 or i >= len(self.data_offsets) - 1:
20
- raise IndexError('index out of range')
21
-
22
- def __del__(self):
23
- if self.data_file:
24
- self.data_file.close()
25
-
26
- def __getitem__(self, i):
27
- self.check_index(i)
28
-
29
- if self.num_cache > 0:
30
- if i in self.cache.keys():
31
- return self.cache[i]
32
- # for c in self.cache:
33
- # if c[0] == i:
34
- # return c[1]
35
- self.data_file.seek(self.data_offsets[i])
36
- b = self.data_file.read(self.data_offsets[i + 1] - self.data_offsets[i])
37
- item = pickle.loads(b)
38
- if self.num_cache > 0 and len(self.cache) < self.num_cache:
39
- if i not in self.cache.keys():
40
- self.cache[i] = deepcopy(item)
41
- # self.cache = [(i, deepcopy(item))] + self.cache[:-1]
42
- return item
43
-
44
- def __len__(self):
45
- return len(self.data_offsets) - 1
46
-
47
- class IndexedDatasetBuilder:
48
- def __init__(self, path):
49
- self.path = path
50
- self.out_file = open(f"{path}.data", 'wb')
51
- self.byte_offsets = [0]
52
-
53
- def add_item(self, item):
54
- s = pickle.dumps(item)
55
- bytes = self.out_file.write(s)
56
- self.byte_offsets.append(self.byte_offsets[-1] + bytes)
57
-
58
- def finalize(self):
59
- self.out_file.close()
60
- np.save(open(f"{self.path}.idx", 'wb'), {'offsets': self.byte_offsets})
61
-
62
-
63
- if __name__ == "__main__":
64
- import random
65
- from tqdm import tqdm
66
- ds_path = '/tmp/indexed_ds_example'
67
- size = 100
68
- items = [{"a": np.random.normal(size=[10000, 10]),
69
- "b": np.random.normal(size=[10000, 10])} for i in range(size)]
70
- builder = IndexedDatasetBuilder(ds_path)
71
- for i in tqdm(range(size)):
72
- builder.add_item(items[i])
73
- builder.finalize()
74
- ds = IndexedDataset(ds_path)
75
- for i in tqdm(range(10000)):
76
- idx = random.randint(0, size - 1)
77
- assert (ds[idx]['a'] == items[idx]['a']).all()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/03GR-Chatbot-Memory/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 03GR Chatbot Memory
3
- emoji: ⚡
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.6
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Provider.py DELETED
@@ -1,16 +0,0 @@
1
- import os
2
- from ..typing import sha256, Dict, get_type_hints
3
-
4
- url = None
5
- model = None
6
- supports_stream = False
7
- needs_auth = False
8
-
9
-
10
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
- return
12
-
13
-
14
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
15
- '(%s)' % ', '.join(
16
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py DELETED
@@ -1,19 +0,0 @@
1
- _base_ = './yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py'
2
-
3
- model = dict(
4
- backbone=dict(arch='E'),
5
- neck=dict(
6
- use_maxpool_in_downsample=True,
7
- use_in_channels_in_downsample=True,
8
- block_cfg=dict(
9
- type='ELANBlock',
10
- middle_ratio=0.4,
11
- block_ratio=0.2,
12
- num_blocks=6,
13
- num_convs_in_block=1),
14
- in_channels=[320, 640, 960, 1280],
15
- out_channels=[160, 320, 480, 640]),
16
- bbox_head=dict(
17
- head_module=dict(
18
- in_channels=[160, 320, 480, 640],
19
- main_out_channels=[320, 640, 960, 1280])))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192.py DELETED
@@ -1,172 +0,0 @@
1
- _base_ = [
2
- '../../../_base_/default_runtime.py',
3
- '../../../_base_/datasets/deepfashion2.py'
4
- ]
5
-
6
- default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
7
-
8
- resume = False # 断点恢复
9
- load_from = None # 模型权重加载
10
- train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10) # 训练轮数,测试间隔
11
- param_scheduler = [
12
- dict( # warmup策略
13
- type='LinearLR',
14
- begin=0,
15
- end=500,
16
- start_factor=0.001,
17
- by_epoch=False),
18
- dict( # scheduler
19
- type='MultiStepLR',
20
- begin=0,
21
- end=120,
22
- milestones=[80, 100],
23
- gamma=0.1,
24
- by_epoch=True)
25
- ]
26
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
27
- auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
28
-
29
- backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
30
- dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
31
- data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
32
- data_root = 'data/deepfashion2/' # 数据存放路径
33
- # 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
34
- codec = dict(
35
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
36
-
37
- train_pipeline = [
38
- dict(type='LoadImage'),
39
- dict(type='GetBBoxCenterScale'),
40
- dict(type='RandomFlip', direction='horizontal'),
41
- dict(
42
- type='RandomBBoxTransform',
43
- shift_prob=0,
44
- rotate_factor=60,
45
- scale_factor=(0.75, 1.25)),
46
- dict(type='TopdownAffine', input_size=codec['input_size']),
47
- dict(type='GenerateTarget', encoder=codec),
48
- dict(type='PackPoseInputs')
49
- ]
50
- val_pipeline = [ # 测试时数据增强
51
- dict(type='LoadImage', backend_args=backend_args), # 加载图片
52
- dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
53
- dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
54
- dict(type='PackPoseInputs') # 对target进行打包用于训练
55
- ]
56
- train_dataloader = dict( # 训练数据加载
57
- batch_size=64, # 批次大小
58
- num_workers=6, # 数据加载进程数
59
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
60
- sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
61
- dataset=dict(
62
- type=dataset_type, # 数据集类名
63
- data_root=data_root, # 数据集路径
64
- data_mode=data_mode, # 算法类型
65
- ann_file='train/deepfashion2_skirt.json', # 标注文件路径
66
- data_prefix=dict(img='train/image/'), # 图像路径
67
- pipeline=train_pipeline # 数据流水线
68
- ))
69
- val_dataloader = dict(
70
- batch_size=32,
71
- num_workers=6,
72
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
73
- drop_last=False,
74
- sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
75
- dataset=dict(
76
- type=dataset_type, # 数据集类名
77
- data_root=data_root, # 数据集路径
78
- data_mode=data_mode, # 算法类型
79
- ann_file='validation/deepfashion2_skirt.json', # 标注文件路径
80
- data_prefix=dict(img='validation/image/'), # 图像路径
81
- test_mode=True, # 测试模式开关
82
- pipeline=val_pipeline # 数据流水线
83
- ))
84
- test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
85
-
86
- channel_cfg = dict(
87
- num_output_channels=294,
88
- dataset_joints=294,
89
- dataset_channel=[
90
- [
91
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
92
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
93
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
94
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
95
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
96
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
97
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
98
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
99
- 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
100
- 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
101
- 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
102
- 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
103
- 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
104
- 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
105
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
106
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
107
- 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
108
- 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
109
- 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
110
- 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
111
- 285, 286, 287, 288, 289, 290, 291, 292, 293
112
- ],
113
- ],
114
- inference_channel=[
115
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
116
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
117
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
118
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
119
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
120
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
121
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
122
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
123
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
124
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
125
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
126
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
127
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
128
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
129
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
130
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
131
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
132
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
133
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
134
- 290, 291, 292, 293
135
- ])
136
-
137
- model = dict(
138
- type='TopdownPoseEstimator', # 模型结构决定了算法流程
139
- data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
140
- type='PoseDataPreprocessor',
141
- mean=[123.675, 116.28, 103.53],
142
- std=[58.395, 57.12, 57.375],
143
- bgr_to_rgb=True),
144
- backbone=dict(
145
- type='ResNet',
146
- depth=50,
147
- init_cfg=dict(
148
- type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
149
- checkpoint='torchvision://resnet50')),
150
- head=dict( # 模型头部
151
- type='HeatmapHead',
152
- in_channels=2048,
153
- out_channels=channel_cfg['num_output_channels'],
154
- # deconv_out_channels=None,
155
- loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
156
- decoder=codec), # 解码器,将heatmap解码成坐标值
157
- test_cfg=dict(
158
- flip_test=True, # 开启测试时水平翻转集成
159
- flip_mode='heatmap', # 对heatmap进行翻转
160
- shift_heatmap=True, # 对翻转后的结果进行平移提高精度
161
- ))
162
-
163
- val_evaluator = [
164
- dict(type='PCKAccuracy', thr=0.2),
165
- dict(type='AUC'),
166
- dict(type='EPE'),
167
- ]
168
- test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
169
-
170
- visualizer = dict(
171
- vis_backends=[dict(type='LocalVisBackend'),
172
- dict(type='WandbVisBackend')])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_mixup.py DELETED
@@ -1,17 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='ImageClassifier',
4
- backbone=dict(
5
- type='ResNet',
6
- depth=50,
7
- num_stages=4,
8
- out_indices=(3, ),
9
- style='pytorch'),
10
- neck=dict(type='GlobalAveragePooling'),
11
- head=dict(
12
- type='MultiLabelLinearClsHead',
13
- num_classes=1000,
14
- in_channels=2048,
15
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)),
16
- train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)),
17
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Theb.py DELETED
@@ -1,97 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import random
5
-
6
- import requests
7
-
8
- from ...typing import Any, CreateResult
9
- from ..base_provider import BaseProvider
10
-
11
-
12
- class Theb(BaseProvider):
13
- url = "https://theb.ai"
14
- working = True
15
- supports_stream = True
16
- supports_gpt_35_turbo = True
17
- needs_auth = True
18
-
19
- @staticmethod
20
- def create_completion(
21
- model: str,
22
- messages: list[dict[str, str]],
23
- stream: bool, **kwargs: Any) -> CreateResult:
24
-
25
- conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
26
- conversation += "\nassistant: "
27
-
28
- auth = kwargs.get("auth", {
29
- "bearer_token":"free",
30
- "org_id":"theb",
31
- })
32
-
33
- bearer_token = auth["bearer_token"]
34
- org_id = auth["org_id"]
35
-
36
- headers = {
37
- 'authority' : 'beta.theb.ai',
38
- 'accept' : 'text/event-stream',
39
- 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
40
- 'authorization' : 'Bearer '+bearer_token,
41
- 'content-type' : 'application/json',
42
- 'origin' : 'https://beta.theb.ai',
43
- 'referer' : 'https://beta.theb.ai/home',
44
- 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
45
- 'sec-ch-ua-mobile' : '?0',
46
- 'sec-ch-ua-platform': '"Windows"',
47
- 'sec-fetch-dest' : 'empty',
48
- 'sec-fetch-mode' : 'cors',
49
- 'sec-fetch-site' : 'same-origin',
50
- 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
51
- 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
52
- }
53
-
54
- req_rand = random.randint(100000000, 9999999999)
55
-
56
- json_data: dict[str, Any] = {
57
- "text" : conversation,
58
- "category" : "04f58f64a4aa4191a957b47290fee864",
59
- "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
60
- "model_params": {
61
- "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
62
- "temperature" : kwargs.get("temperature", 1),
63
- "top_p" : kwargs.get("top_p", 1),
64
- "frequency_penalty" : kwargs.get("frequency_penalty", 0),
65
- "presence_penalty" : kwargs.get("presence_penalty", 0),
66
- "long_term_memory" : "auto"
67
- }
68
- }
69
-
70
- response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
71
- headers=headers, json=json_data, stream=True)
72
-
73
- response.raise_for_status()
74
- content = ""
75
- next_content = ""
76
- for chunk in response.iter_lines():
77
- if b"content" in chunk:
78
- next_content = content
79
- data = json.loads(chunk.decode().split("data: ")[1])
80
- content = data["content"]
81
- yield data["content"].replace(next_content, "")
82
-
83
- @classmethod
84
- @property
85
- def params(cls):
86
- params = [
87
- ("model", "str"),
88
- ("messages", "list[dict[str, str]]"),
89
- ("auth", "list[dict[str, str]]"),
90
- ("stream", "bool"),
91
- ("temperature", "float"),
92
- ("presence_penalty", "int"),
93
- ("frequency_penalty", "int"),
94
- ("top_p", "int")
95
- ]
96
- param = ", ".join([": ".join(p) for p in params])
97
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/text-to-speech-client/assets/index-5644c887.css DELETED
@@ -1 +0,0 @@
1
- *,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.static{position:static}.absolute{position:absolute}.relative{position:relative}.left-0{left:0}.top-0{top:0}.z-10{z-index:10}.z-50{z-index:50}.m-2{margin:.5rem}.my-4{margin-top:1rem;margin-bottom:1rem}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.block{display:block}.flex{display:flex}.h-14{height:3.5rem}.h-full{height:100%}.min-h-screen{min-height:100vh}.w-\[1\%\]{width:1%}.w-full{width:100%}.max-w-xl{max-width:36rem}.cursor-not-allowed{cursor:not-allowed}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.gap-1{gap:.25rem}.overflow-hidden{overflow:hidden}.whitespace-nowrap{white-space:nowrap}.rounded-lg{border-radius:.5rem}.rounded-md{border-radius:.375rem}.border{border-width:1px}.border-gray-300{--tw-border-opacity: 1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}.bg-blue-500{--tw-bg-opacity: 1;background-color:rgb(59 130 246 / var(--tw-bg-opacity))}.bg-gray-100{--tw-bg-opacity: 1;background-color:rgb(243 244 246 / var(--tw-bg-opacity))}.bg-gray-400{--tw-bg-opacity: 1;background-color:rgb(156 163 175 / var(--tw-bg-opacity))}.bg-white{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity))}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-8{padding:2rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-4{padding-left:1rem;padding-right:1rem}.px-8{padding-left:2rem;padding-right:2rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.text-left{text-align:left}.text-center{text-align:center}.text-3xl{font-size:1.875rem;line-height:2.25rem}.text-base{font-size:1rem;line-height:1.5rem}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xl{font-size:1.25rem;line-height:1.75rem}.font-medium{font-weight:500}.font-semibold{font-weight:600}.text-black{--tw-text-opacity: 1;color:rgb(0 0 0 / var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity: 1;color:rgb(75 85 99 / var(--tw-text-opacity))}.text-gray-700{--tw-text-opacity: 1;color:rgb(55 65 81 / var(--tw-text-opacity))}.text-gray-800{--tw-text-opacity: 1;color:rgb(31 41 55 / var(--tw-text-opacity))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.shadow-lg{--tw-shadow: 0 10px 15px -3px rgb(0 0 0 / .1), 0 4px 6px -4px rgb(0 0 0 / .1);--tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-xl{--tw-shadow: 0 20px 25px -5px rgb(0 0 0 / .1), 0 8px 10px -6px rgb(0 0 0 / .1);--tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color), 0 8px 10px -6px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-black\/5{--tw-shadow-color: rgb(0 0 0 / .05);--tw-shadow: var(--tw-shadow-colored)}.ring-1{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.ring-slate-700\/10{--tw-ring-color: rgb(51 65 85 / .1)}.blur{--tw-blur: blur(8px);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.transition-all{transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}:root{font-family:Inter,system-ui,Avenir,Helvetica,Arial,sans-serif;line-height:1.5;font-weight:400;color:#213547;background-color:#fff;font-synthesis:none;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;-webkit-text-size-adjust:100%}audio::-webkit-media-controls-panel{background-color:#fff}.hover\:bg-blue-600:hover{--tw-bg-opacity: 1;background-color:rgb(37 99 235 / var(--tw-bg-opacity))}
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/AddChildrenMap.js DELETED
@@ -1,6 +0,0 @@
1
- var AddChildrenMap = function (key, gameObject) {
2
- this.childrenMap[key] = gameObject;
3
- return this;
4
- }
5
-
6
- export default AddChildrenMap;
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/TableSetInteractive.js DELETED
@@ -1,19 +0,0 @@
1
- import PointerUpDownCell from './PointerUpDownCell.js';
2
- import OverCell from './OverCell.js';
3
- import ClickCell from './ClickCell.js';
4
- import TapCell from './TapCell.js';
5
- import PressCell from './PressCell.js';
6
- import SwipeCell from './SwipeCell.js';
7
-
8
- var TableSetInteractive = function (table, tableConfig) {
9
- table.setInteractive();
10
-
11
- PointerUpDownCell.call(this, table, tableConfig);
12
- OverCell.call(this, table, tableConfig);
13
- ClickCell.call(this, table, tableConfig);
14
- TapCell.call(this, table, tableConfig);
15
- PressCell.call(this, table, tableConfig);
16
- SwipeCell.call(this, table, tableConfig);
17
- }
18
-
19
- export default TableSetInteractive;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/simpledropdownlist/SimpleDropDownList.js DELETED
@@ -1,27 +0,0 @@
1
- import DropDownList from '../dropdownlist/DropDownList.js';
2
- import BuildListConfig from '../utils/build/BuildListConfig.js';
3
-
4
- class SimpleDropDownList extends DropDownList {
5
- constructor(scene, config, creators) {
6
- config = BuildListConfig(scene, config, creators);
7
- super(scene, config);
8
- this.type = 'rexSimpleDropDownList';
9
- }
10
-
11
- setOptions(options) {
12
- if (options === undefined) {
13
- options = [];
14
- }
15
- for (var i = 0, cnt = options.length; i < cnt; i++) {
16
- var option = options[i];
17
- if (typeof (option) === 'string') {
18
- options[i] = { text: option, value: option };
19
- }
20
- }
21
- super.setOptions(options);
22
- return this;
23
- }
24
-
25
- }
26
-
27
- export default SimpleDropDownList;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alashazam/Harmony/app.py DELETED
@@ -1,45 +0,0 @@
1
- import gradio
2
-
3
- class Model:
4
- def __init__(self, name, path="", prefix=""):
5
- self.name = name
6
- self.path = path
7
- self.prefix = prefix
8
-
9
- models = [
10
- Model("Marvel","models/ItsJayQz/Marvel_WhatIf_Diffusion", "whatif style"),
11
- Model("Cyberpunk Anime Diffusion", "models/DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style"),
12
- Model("Portrait plus", "models/wavymulder/portraitplus", "portrait+ style"),
13
- Model("CF25", "models/gsdf/Counterfeit-V2.5", "anime style"),
14
- Model("vintedois", "models/22h/vintedois-diffusion-v0-1", "vintedois style"),
15
- Model("dreamlike", "models/dreamlike-art/dreamlike-diffusion-1.0","dreamlike style"),
16
- #Model("Orange Mix","models/WarriorMama777/OrangeMixs", "OrangeMixs style"),
17
- Model("GTA5","models/ItsJayQz/GTA5_Artwork_Diffusion", "GTA5 style")
18
- ]
19
-
20
- model1=[]
21
- model2=[]
22
- model3=[]
23
-
24
- for i in range(len(models)):
25
- model3.append(models[i].name)
26
- model1.append(gradio.Interface.load(models[i].path))
27
- model2.append(models[i].prefix)
28
-
29
- def process1(prompt, modelSelected):
30
- if (modelSelected==''):
31
- modelSelected = "Marvel"
32
- model_idx=model3.index(modelSelected)
33
- prompt+=", in "+model2[model_idx]
34
- image_return = model1[model_idx](prompt)
35
- return image_return
36
-
37
- sandbox = gradio.Interface(fn=process1,
38
- inputs=[gradio.Textbox(label="Enter Prompt:"), gradio.Dropdown(model3)],
39
- outputs=[gradio.Image(label="Produced Image")],
40
- title='Text to Image',
41
- examples=[["Portrait close up, Elvis Presley, concert hall in the background", "GTA5"],
42
- ["Marvel Blackwidow portrait close up. building city background", "Marvel"],
43
- ["A white rabbit wizard, Hogwart University, Castle in the background", "dreamlike"]])
44
-
45
- sandbox.queue(concurrency_count=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlhitawiMohammed22/HTD_HTR/app.py DELETED
@@ -1,145 +0,0 @@
1
- import os
2
- os.environ["USE_TORCH"] = "1"
3
- os.environ["USE_TF"] = "0"
4
- import torch
5
- from torch.utils.data.dataloader import DataLoader
6
-
7
- from builder import DocumentBuilder
8
- from trocr import IAMDataset, device, get_processor_model
9
- from doctr.utils.visualization import visualize_page
10
- from doctr.models.predictor.base import _OCRPredictor
11
- from doctr.models.detection.predictor import DetectionPredictor
12
- from doctr.models.preprocessor import PreProcessor
13
- from doctr.models import db_resnet50, db_mobilenet_v3_large
14
-
15
- from doctr.io import DocumentFile
16
- import numpy as np
17
- import cv2
18
- import matplotlib.pyplot as plt
19
- import streamlit as st
20
-
21
- DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large"]
22
- RECO_ARCHS = ["microsoft/trocr-large-printed", "microsoft/trocr-large-stage1", "microsoft/trocr-large-handwritten"]
23
-
24
-
25
- def main():
26
- # Wide mode
27
- st.set_page_config(layout="wide")
28
- # Designing the interface
29
- st.title("docTR + TrOCR")
30
- # For newline
31
- st.write('\n')
32
- #
33
- st.write('For Detection DocTR: https://github.com/mindee/doctr')
34
- # For newline
35
- st.write('\n')
36
- st.write('For Recognition TrOCR: https://github.com/microsoft/unilm/tree/master/trocr')
37
- # For newline
38
- st.write('\n')
39
-
40
- st.write('Any Issue please dm')
41
- # For newline
42
- st.write('\n')
43
- # Instructions
44
- st.markdown(
45
- "*Hint: click on the top-right corner of an image to enlarge it!*")
46
- # Set the columns
47
- cols = st.columns((1, 1, 1))
48
- cols[0].subheader("Input page")
49
- cols[1].subheader("Segmentation heatmap")
50
-
51
- # Sidebar
52
- # File selection
53
- st.sidebar.title("Document selection")
54
- # Disabling warning
55
- st.set_option('deprecation.showfileUploaderEncoding', False)
56
- # Choose your own image
57
- uploaded_file = st.sidebar.file_uploader(
58
- "Upload files", type=['pdf', 'png', 'jpeg', 'jpg'])
59
- if uploaded_file is not None:
60
- if uploaded_file.name.endswith('.pdf'):
61
- doc = DocumentFile.from_pdf(uploaded_file.read()).as_images()
62
- else:
63
- doc = DocumentFile.from_images(uploaded_file.read())
64
- page_idx = st.sidebar.selectbox(
65
- "Page selection", [idx + 1 for idx in range(len(doc))]) - 1
66
- cols[0].image(doc[page_idx])
67
- # Model selection
68
- st.sidebar.title("Model selection")
69
- det_arch = st.sidebar.selectbox("Text detection model", DET_ARCHS)
70
- rec_arch = st.sidebar.selectbox("Text recognition model", RECO_ARCHS)
71
- # For newline
72
- st.sidebar.write('\n')
73
- if st.sidebar.button("Analyze page"):
74
- if uploaded_file is None:
75
- st.sidebar.write("Please upload a document")
76
- else:
77
- with st.spinner('Loading model...'):
78
- if det_arch == "db_resnet50":
79
- det_model = db_resnet50(pretrained=True)
80
- else:
81
- det_model = db_mobilenet_v3_large(pretrained=True)
82
- det_predictor = DetectionPredictor(PreProcessor((1024, 1024), batch_size=1, mean=(0.798, 0.785, 0.772), std=(0.264, 0.2749, 0.287)), det_model)
83
- rec_processor, rec_model = get_processor_model(rec_arch)
84
- with st.spinner('Analyzing...'):
85
- # Forward the image to the model
86
- processed_batches = det_predictor.pre_processor([doc[page_idx]])
87
- out = det_predictor.model(processed_batches[0], return_model_output=True)
88
- seg_map = out["out_map"]
89
- seg_map = torch.squeeze(seg_map[0, ...], axis=0)
90
- seg_map = cv2.resize(seg_map.detach().numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),
91
- interpolation=cv2.INTER_LINEAR)
92
- # Plot the raw heatmap
93
- fig, ax = plt.subplots()
94
- ax.imshow(seg_map)
95
- ax.axis('off')
96
- cols[1].pyplot(fig)
97
-
98
- # Plot OCR output
99
- # Localize text elements
100
- loc_preds = out["preds"]
101
-
102
- # Check whether crop mode should be switched to channels first
103
- channels_last = len(doc) == 0 or isinstance(doc[0], np.ndarray)
104
-
105
- # Crop images
106
- crops, loc_preds = _OCRPredictor._prepare_crops(
107
- doc, loc_preds, channels_last=channels_last, assume_straight_pages=True
108
- )
109
-
110
- test_dataset = IAMDataset(crops[0], rec_processor)
111
- test_dataloader = DataLoader(test_dataset, batch_size=16)
112
-
113
- text = []
114
- with torch.no_grad():
115
- for batch in test_dataloader:
116
- pixel_values = batch["pixel_values"].to(device)
117
- generated_ids = rec_model.generate(pixel_values)
118
- generated_text = rec_processor.batch_decode(
119
- generated_ids, skip_special_tokens=True)
120
- text.extend(generated_text)
121
- boxes, text_preds = _OCRPredictor._process_predictions(
122
- loc_preds, text)
123
-
124
- doc_builder = DocumentBuilder()
125
- out = doc_builder(
126
- boxes,
127
- text_preds,
128
- [
129
- # type: ignore[misc]
130
- page.shape[:2] if channels_last else page.shape[-2:]
131
- for page in [doc[page_idx]]
132
- ]
133
- )
134
-
135
- for df in out:
136
- st.markdown("text")
137
- st.write(" ".join(df["word"].to_list()))
138
- st.write('\n')
139
- st.markdown("\n Dataframe Output- similar to Tesseract:")
140
- st.dataframe(df)
141
-
142
-
143
-
144
- if __name__ == '__main__':
145
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/eval/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models_face.py DELETED
@@ -1,819 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- import math
4
- import random
5
- import functools
6
- import operator
7
-
8
- import torch
9
- from torch import nn
10
- from torch.nn import functional as F
11
- import torch.nn.init as init
12
- from torch.autograd import Function
13
-
14
- from .op_edit import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
15
-
16
-
17
- class PixelNorm(nn.Module):
18
- def __init__(self):
19
- super().__init__()
20
-
21
- def forward(self, input):
22
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
23
-
24
-
25
- def make_kernel(k):
26
- k = torch.tensor(k, dtype=torch.float32)
27
-
28
- if k.ndim == 1:
29
- k = k[None, :] * k[:, None]
30
-
31
- k /= k.sum()
32
-
33
- return k
34
-
35
-
36
- class Upsample(nn.Module):
37
- def __init__(self, kernel, factor=2):
38
- super().__init__()
39
-
40
- self.factor = factor
41
- kernel = make_kernel(kernel) * (factor ** 2)
42
- self.register_buffer("kernel", kernel)
43
-
44
- p = kernel.shape[0] - factor
45
-
46
- pad0 = (p + 1) // 2 + factor - 1
47
- pad1 = p // 2
48
-
49
- self.pad = (pad0, pad1)
50
-
51
- def forward(self, input):
52
- out = upfirdn2d(input, self.kernel, up=self.factor,
53
- down=1, pad=self.pad)
54
-
55
- return out
56
-
57
-
58
- class Downsample(nn.Module):
59
- def __init__(self, kernel, factor=2):
60
- super().__init__()
61
-
62
- self.factor = factor
63
- kernel = make_kernel(kernel)
64
- self.register_buffer("kernel", kernel)
65
-
66
- p = kernel.shape[0] - factor
67
-
68
- pad0 = (p + 1) // 2
69
- pad1 = p // 2
70
-
71
- self.pad = (pad0, pad1)
72
-
73
- def forward(self, input):
74
- out = upfirdn2d(input, self.kernel, up=1,
75
- down=self.factor, pad=self.pad)
76
-
77
- return out
78
-
79
-
80
- class Blur(nn.Module):
81
- def __init__(self, kernel, pad, upsample_factor=1):
82
- super().__init__()
83
-
84
- kernel = make_kernel(kernel)
85
-
86
- if upsample_factor > 1:
87
- kernel = kernel * (upsample_factor ** 2)
88
-
89
- self.register_buffer("kernel", kernel)
90
-
91
- self.pad = pad
92
-
93
- def forward(self, input):
94
- out = upfirdn2d(input, self.kernel, pad=self.pad)
95
-
96
- return out
97
-
98
-
99
- class EqualConv2d(nn.Module):
100
- def __init__(
101
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
102
- ):
103
- super().__init__()
104
-
105
- self.weight = nn.Parameter(
106
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
107
- )
108
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
109
-
110
- self.stride = stride
111
- self.padding = padding
112
-
113
- if bias:
114
- self.bias = nn.Parameter(torch.zeros(out_channel))
115
-
116
- else:
117
- self.bias = None
118
-
119
- def forward(self, input):
120
- out = F.conv2d(
121
- input,
122
- self.weight * self.scale,
123
- bias=self.bias,
124
- stride=self.stride,
125
- padding=self.padding,
126
- )
127
-
128
- return out
129
-
130
- def __repr__(self):
131
- return (
132
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
133
- f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
134
- )
135
-
136
-
137
- class EqualLinear(nn.Module):
138
- def __init__(
139
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
140
- ):
141
- super().__init__()
142
-
143
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
144
-
145
- if bias:
146
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
147
-
148
- else:
149
- self.bias = None
150
-
151
- self.activation = activation
152
-
153
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
154
- self.lr_mul = lr_mul
155
-
156
- def forward(self, input):
157
- if self.activation:
158
- out = F.linear(input, self.weight * self.scale)
159
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
160
-
161
- else:
162
- out = F.linear(
163
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
164
- )
165
-
166
- return out
167
-
168
- def __repr__(self):
169
- return (
170
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
171
- )
172
-
173
-
174
- class ScaledLeakyReLU(nn.Module):
175
- def __init__(self, negative_slope=0.2):
176
- super().__init__()
177
-
178
- self.negative_slope = negative_slope
179
-
180
- def forward(self, input):
181
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
182
-
183
- return out * math.sqrt(2)
184
-
185
-
186
- class ModulatedConv2d(nn.Module):
187
- def __init__(
188
- self,
189
- in_channel,
190
- out_channel,
191
- kernel_size,
192
- style_dim,
193
- demodulate=True,
194
- upsample=False,
195
- downsample=False,
196
- blur_kernel=[1, 3, 3, 1],
197
- ):
198
- super().__init__()
199
-
200
- self.eps = 1e-8
201
- self.kernel_size = kernel_size
202
- self.in_channel = in_channel
203
- self.out_channel = out_channel
204
- self.upsample = upsample
205
- self.downsample = downsample
206
-
207
- if upsample:
208
- factor = 2
209
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
210
- pad0 = (p + 1) // 2 + factor - 1
211
- pad1 = p // 2 + 1
212
-
213
- self.blur = Blur(blur_kernel, pad=(
214
- pad0, pad1), upsample_factor=factor)
215
-
216
- if downsample:
217
- factor = 2
218
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
219
- pad0 = (p + 1) // 2
220
- pad1 = p // 2
221
-
222
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
223
-
224
- fan_in = in_channel * kernel_size ** 2
225
- self.scale = 1 / math.sqrt(fan_in)
226
- self.padding = kernel_size // 2
227
-
228
- self.weight = nn.Parameter(
229
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
230
- )
231
-
232
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
233
-
234
- self.demodulate = demodulate
235
-
236
- def __repr__(self):
237
- return (
238
- f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
239
- f"upsample={self.upsample}, downsample={self.downsample})"
240
- )
241
-
242
- def forward(self, input, style):
243
- batch, in_channel, height, width = input.shape
244
-
245
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
246
- weight = self.scale * self.weight * style
247
-
248
- if self.demodulate:
249
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
250
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
251
-
252
- weight = weight.view(
253
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
254
- )
255
-
256
- if self.upsample:
257
- input = input.view(1, batch * in_channel, height, width)
258
- weight = weight.view(
259
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
260
- )
261
- weight = weight.transpose(1, 2).reshape(
262
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
263
- )
264
- out = F.conv_transpose2d(
265
- input, weight, padding=0, stride=2, groups=batch)
266
- _, _, height, width = out.shape
267
- out = out.view(batch, self.out_channel, height, width)
268
- out = self.blur(out)
269
-
270
- elif self.downsample:
271
- input = self.blur(input)
272
- _, _, height, width = input.shape
273
- input = input.view(1, batch * in_channel, height, width)
274
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
275
- _, _, height, width = out.shape
276
- out = out.view(batch, self.out_channel, height, width)
277
-
278
- else:
279
- input = input.view(1, batch * in_channel, height, width)
280
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
281
- _, _, height, width = out.shape
282
- out = out.view(batch, self.out_channel, height, width)
283
-
284
- return out
285
-
286
-
287
- class NoiseInjection(nn.Module):
288
- def __init__(self):
289
- super().__init__()
290
-
291
- self.weight = nn.Parameter(torch.zeros(1))
292
-
293
- def forward(self, image, noise=None):
294
- if noise is None:
295
- batch, _, height, width = image.shape
296
- noise = image.new_empty(batch, 1, height, width).normal_()
297
-
298
- return image + self.weight * noise
299
-
300
-
301
- class ConstantInput(nn.Module):
302
- def __init__(self, channel, size=4):
303
- super().__init__()
304
-
305
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
306
-
307
- def forward(self, input):
308
- batch = input.shape[0]
309
- out = self.input.repeat(batch, 1, 1, 1)
310
-
311
- return out
312
-
313
-
314
- class StyledConv(nn.Module):
315
- def __init__(
316
- self,
317
- in_channel,
318
- out_channel,
319
- kernel_size,
320
- style_dim,
321
- upsample=False,
322
- blur_kernel=[1, 3, 3, 1],
323
- demodulate=True,
324
- ):
325
- super().__init__()
326
-
327
- self.conv = ModulatedConv2d(
328
- in_channel,
329
- out_channel,
330
- kernel_size,
331
- style_dim,
332
- upsample=upsample,
333
- blur_kernel=blur_kernel,
334
- demodulate=demodulate,
335
- )
336
-
337
- self.noise = NoiseInjection()
338
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
339
- # self.activate = ScaledLeakyReLU(0.2)
340
- self.activate = FusedLeakyReLU(out_channel)
341
-
342
- def forward(self, input, style, noise=None):
343
- out = self.conv(input, style)
344
- out = self.noise(out, noise=noise)
345
- # out = out + self.bias
346
- out = self.activate(out)
347
-
348
- return out
349
-
350
-
351
- class ToRGB(nn.Module):
352
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
353
- super().__init__()
354
-
355
- if upsample:
356
- self.upsample = Upsample(blur_kernel)
357
-
358
- self.conv = ModulatedConv2d(
359
- in_channel, 3, 1, style_dim, demodulate=False)
360
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
361
-
362
- def forward(self, input, style, skip=None):
363
- out = self.conv(input, style)
364
- out = out + self.bias
365
-
366
- if skip is not None:
367
- skip = self.upsample(skip)
368
-
369
- out = out + skip
370
-
371
- return out
372
-
373
-
374
- class Generator(nn.Module):
375
- def __init__(
376
- self,
377
- size,
378
- style_dim,
379
- n_mlp,
380
- channel_multiplier=1,
381
- blur_kernel=[1, 3, 3, 1],
382
- lr_mlp=0.01,
383
- small=False,
384
- small_isaac=False,
385
- ):
386
- super().__init__()
387
-
388
- self.size = size
389
-
390
- if small and size > 64:
391
- raise ValueError("small only works for sizes <= 64")
392
-
393
- self.style_dim = style_dim
394
-
395
- layers = [PixelNorm()]
396
-
397
- for i in range(n_mlp):
398
- layers.append(
399
- EqualLinear(
400
- style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
401
- )
402
- )
403
-
404
- self.style = nn.Sequential(*layers)
405
-
406
- if small:
407
- self.channels = {
408
- 4: 64 * channel_multiplier,
409
- 8: 64 * channel_multiplier,
410
- 16: 64 * channel_multiplier,
411
- 32: 64 * channel_multiplier,
412
- 64: 64 * channel_multiplier,
413
- }
414
- elif small_isaac:
415
- self.channels = {4: 256, 8: 256,
416
- 16: 256, 32: 256, 64: 128, 128: 128}
417
- else:
418
- self.channels = {
419
- 4: 512,
420
- 8: 512,
421
- 16: 512,
422
- 32: 512,
423
- 64: 256 * channel_multiplier,
424
- 128: 128 * channel_multiplier,
425
- 256: 64 * channel_multiplier,
426
- 512: 32 * channel_multiplier,
427
- 1024: 16 * channel_multiplier,
428
- }
429
-
430
- self.input = ConstantInput(self.channels[4])
431
- self.conv1 = StyledConv(
432
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
433
- )
434
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
435
-
436
- self.log_size = int(math.log(size, 2))
437
- self.num_layers = (self.log_size - 2) * 2 + 1
438
-
439
- self.convs = nn.ModuleList()
440
- self.upsamples = nn.ModuleList()
441
- self.to_rgbs = nn.ModuleList()
442
- self.noises = nn.Module()
443
-
444
- in_channel = self.channels[4]
445
-
446
- for layer_idx in range(self.num_layers):
447
- res = (layer_idx + 5) // 2
448
- shape = [1, 1, 2 ** res, 2 ** res]
449
- self.noises.register_buffer(
450
- "noise_{}".format(layer_idx), torch.randn(*shape)
451
- )
452
-
453
- for i in range(3, self.log_size + 1):
454
- out_channel = self.channels[2 ** i]
455
-
456
- self.convs.append(
457
- StyledConv(
458
- in_channel,
459
- out_channel,
460
- 3,
461
- style_dim,
462
- upsample=True,
463
- blur_kernel=blur_kernel,
464
- )
465
- )
466
-
467
- self.convs.append(
468
- StyledConv(
469
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
470
- )
471
- )
472
-
473
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
474
-
475
- in_channel = out_channel
476
-
477
- self.n_latent = self.log_size * 2 - 2
478
-
479
- def make_noise(self):
480
- device = self.input.input.device
481
-
482
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
483
-
484
- for i in range(3, self.log_size + 1):
485
- for _ in range(2):
486
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
487
-
488
- return noises
489
-
490
- def mean_latent(self, n_latent):
491
- latent_in = torch.randn(
492
- n_latent, self.style_dim, device=self.input.input.device
493
- )
494
- latent = self.style(latent_in).mean(0, keepdim=True)
495
-
496
- return latent
497
-
498
- def get_latent(self, input):
499
- return self.style(input)
500
-
501
- def forward(
502
- self,
503
- styles,
504
- return_latents=False,
505
- return_features=False,
506
- inject_index=None,
507
- truncation=1,
508
- truncation_latent=None,
509
- input_is_latent=False,
510
- noise=None,
511
- randomize_noise=True,
512
- ):
513
- if not input_is_latent:
514
- # print("haha")
515
- styles = [self.style(s) for s in styles]
516
- if noise is None:
517
- if randomize_noise:
518
- noise = [None] * self.num_layers
519
- else:
520
- noise = [
521
- getattr(self.noises, "noise_{}".format(i))
522
- for i in range(self.num_layers)
523
- ]
524
-
525
- if truncation < 1:
526
- style_t = []
527
-
528
- for style in styles:
529
- style_t.append(
530
- truncation_latent + truncation *
531
- (style - truncation_latent)
532
- )
533
-
534
- styles = style_t
535
- # print(styles)
536
- if len(styles) < 2:
537
- inject_index = self.n_latent
538
-
539
- if styles[0].ndim < 3:
540
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
541
- # print("a")
542
- else:
543
- # print(len(styles))
544
- latent = styles[0]
545
- # print("b", latent.shape)
546
-
547
- else:
548
- # print("c")
549
- if inject_index is None:
550
- inject_index = 4
551
-
552
- latent = styles[0].unsqueeze(0)
553
- if latent.shape[1] == 1:
554
- latent = latent.repeat(1, inject_index, 1)
555
- else:
556
- latent = latent[:, :inject_index, :]
557
- latent2 = styles[1].unsqueeze(1).repeat(
558
- 1, self.n_latent - inject_index, 1)
559
-
560
- latent = torch.cat([latent, latent2], 1)
561
-
562
- features = {}
563
- out = self.input(latent)
564
- features["out_0"] = out
565
- out = self.conv1(out, latent[:, 0], noise=noise[0])
566
- features["conv1_0"] = out
567
-
568
- skip = self.to_rgb1(out, latent[:, 1])
569
- features["skip_0"] = skip
570
- i = 1
571
- for conv1, conv2, noise1, noise2, to_rgb in zip(
572
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
573
- ):
574
- out = conv1(out, latent[:, i], noise=noise1)
575
- features["conv1_{}".format(i)] = out
576
- out = conv2(out, latent[:, i + 1], noise=noise2)
577
- features["conv2_{}".format(i)] = out
578
- skip = to_rgb(out, latent[:, i + 2], skip)
579
- features["skip_{}".format(i)] = skip
580
-
581
- i += 2
582
-
583
- image = skip
584
-
585
- if return_latents:
586
- return image, latent
587
- elif return_features:
588
- return image, features
589
- else:
590
- return image, None
591
-
592
-
593
- class ConvLayer(nn.Sequential):
594
- def __init__(
595
- self,
596
- in_channel,
597
- out_channel,
598
- kernel_size,
599
- downsample=False,
600
- blur_kernel=[1, 3, 3, 1],
601
- bias=True,
602
- activate=True,
603
- ):
604
- layers = []
605
-
606
- if downsample:
607
- factor = 2
608
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
609
- pad0 = (p + 1) // 2
610
- pad1 = p // 2
611
-
612
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
613
-
614
- stride = 2
615
- self.padding = 0
616
-
617
- else:
618
- stride = 1
619
- self.padding = kernel_size // 2
620
-
621
- layers.append(
622
- EqualConv2d(
623
- in_channel,
624
- out_channel,
625
- kernel_size,
626
- padding=self.padding,
627
- stride=stride,
628
- bias=bias and not activate,
629
- )
630
- )
631
-
632
- if activate:
633
- if bias:
634
- layers.append(FusedLeakyReLU(out_channel))
635
-
636
- else:
637
- layers.append(ScaledLeakyReLU(0.2))
638
-
639
- super().__init__(*layers)
640
-
641
-
642
- class ResBlock(nn.Module):
643
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
644
- super().__init__()
645
-
646
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
647
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
648
-
649
- self.skip = ConvLayer(
650
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
651
- )
652
-
653
- def forward(self, input):
654
- out = self.conv1(input)
655
- out = self.conv2(out)
656
-
657
- skip = self.skip(input)
658
- out = (out + skip) / math.sqrt(2)
659
-
660
- return out
661
-
662
-
663
- class StyleDiscriminator(nn.Module):
664
- def __init__(
665
- self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], small=False
666
- ):
667
- super().__init__()
668
-
669
- if small:
670
- channels = {4: 64, 8: 64, 16: 64, 32: 64, 64: 64}
671
-
672
- else:
673
- channels = {
674
- 4: 512,
675
- 8: 512,
676
- 16: 512,
677
- 32: 512,
678
- 64: 256 * channel_multiplier,
679
- 128: 128 * channel_multiplier,
680
- 256: 64 * channel_multiplier,
681
- 512: 32 * channel_multiplier,
682
- 1024: 16 * channel_multiplier,
683
- }
684
-
685
- convs = [ConvLayer(3, channels[size], 1)]
686
-
687
- log_size = int(math.log(size, 2))
688
-
689
- in_channel = channels[size]
690
-
691
- for i in range(log_size, 2, -1):
692
- out_channel = channels[2 ** (i - 1)]
693
-
694
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
695
-
696
- in_channel = out_channel
697
-
698
- self.convs = nn.Sequential(*convs)
699
-
700
- self.stddev_group = 4
701
- self.stddev_feat = 1
702
-
703
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
704
- self.final_linear = nn.Sequential(
705
- EqualLinear(channels[4] * 4 * 4, channels[4],
706
- activation="fused_lrelu"),
707
- EqualLinear(channels[4], 1),
708
- )
709
-
710
- # def forward(self, input):
711
- # out = self.convs(input)
712
-
713
- # batch, channel, height, width = out.shape
714
- # group = min(batch, self.stddev_group)
715
- # stddev = out.view(
716
- # group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
717
- # )
718
- # stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
719
- # stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
720
- # stddev = stddev.repeat(group, 1, height, width)
721
- # out = torch.cat([out, stddev], 1)
722
-
723
- # out = self.final_conv(out)
724
-
725
- # out = out.view(batch, -1)
726
- # out = self.final_linear(out)
727
-
728
- # return out
729
-
730
- def forward(self, input):
731
- h = input
732
- h_list = []
733
-
734
- for index, blocklist in enumerate(self.convs):
735
- h = blocklist(h)
736
- h_list.append(h)
737
-
738
- out = h
739
- batch, channel, height, width = out.shape
740
- group = min(batch, self.stddev_group)
741
- stddev = out.view(
742
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
743
- )
744
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
745
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
746
- stddev = stddev.repeat(group, 1, height, width)
747
- out = torch.cat([out, stddev], 1)
748
-
749
- out = self.final_conv(out)
750
- h_list.append(out)
751
-
752
- out = out.view(batch, -1)
753
- out = self.final_linear(out)
754
-
755
- return out, h_list
756
-
757
-
758
- class StyleEncoder(nn.Module):
759
- def __init__(self, size, w_dim=512):
760
- super().__init__()
761
-
762
- channels = {
763
- 4: 512,
764
- 8: 512,
765
- 16: 512,
766
- 32: 512,
767
- 64: 256,
768
- 128: 128,
769
- 256: 64,
770
- 512: 32,
771
- 1024: 16
772
- }
773
-
774
- self.w_dim = w_dim
775
- log_size = int(math.log(size, 2))
776
-
777
- # self.n_latents = log_size*2 - 2
778
-
779
- convs = [ConvLayer(3, channels[size], 1)]
780
-
781
- in_channel = channels[size]
782
- for i in range(log_size, 2, -1):
783
- out_channel = channels[2 ** (i - 1)]
784
- convs.append(ResBlock(in_channel, out_channel))
785
- in_channel = out_channel
786
-
787
- # convs.append(EqualConv2d(in_channel, self.n_latents*self.w_dim, 4, padding=0, bias=False))
788
- convs.append(EqualConv2d(
789
- in_channel, 2*self.w_dim, 4, padding=0, bias=False))
790
-
791
- self.convs = nn.Sequential(*convs)
792
-
793
- def forward(self, input):
794
- out = self.convs(input)
795
- # return out.view(len(input), self.n_latents, self.w_dim)
796
- reshaped = out.view(len(input), 2*self.w_dim)
797
- return reshaped[:, :self.w_dim], reshaped[:, self.w_dim:]
798
-
799
-
800
- def kaiming_init(m):
801
- if isinstance(m, (nn.Linear, nn.Conv2d)):
802
- init.kaiming_normal_(m.weight)
803
- if m.bias is not None:
804
- m.bias.data.fill_(0)
805
- elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
806
- m.weight.data.fill_(1)
807
- if m.bias is not None:
808
- m.bias.data.fill_(0)
809
-
810
-
811
- def normal_init(m):
812
- if isinstance(m, (nn.Linear, nn.Conv2d)):
813
- init.normal_(m.weight, 0, 0.02)
814
- if m.bias is not None:
815
- m.bias.data.fill_(0)
816
- elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
817
- m.weight.data.fill_(1)
818
- if m.bias is not None:
819
- m.bias.data.fill_(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/paradigms.md DELETED
@@ -1,54 +0,0 @@
1
- <!--Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Parallel Sampling of Diffusion Models
14
-
15
- [Parallel Sampling of Diffusion Models](https://huggingface.co/papers/2305.16317) is by Andy Shih, Suneel Belkhale, Stefano Ermon, Dorsa Sadigh, Nima Anari.
16
-
17
- The abstract from the paper is:
18
-
19
- *Diffusion models are powerful generative models but suffer from slow sampling, often taking 1000 sequential denoising steps for one sample. As a result, considerable efforts have been directed toward reducing the number of denoising steps, but these methods hurt sample quality. Instead of reducing the number of denoising steps (trading quality for speed), in this paper we explore an orthogonal approach: can we run the denoising steps in parallel (trading compute for speed)? In spite of the sequential nature of the denoising steps, we show that surprisingly it is possible to parallelize sampling via Picard iterations, by guessing the solution of future denoising steps and iteratively refining until convergence. With this insight, we present ParaDiGMS, a novel method to accelerate the sampling of pretrained diffusion models by denoising multiple steps in parallel. ParaDiGMS is the first diffusion sampling method that enables trading compute for speed and is even compatible with existing fast sampling techniques such as DDIM and DPMSolver. Using ParaDiGMS, we improve sampling speed by 2-4x across a range of robotics and image generation models, giving state-of-the-art sampling speeds of 0.2s on 100-step DiffusionPolicy and 16s on 1000-step StableDiffusion-v2 with no measurable degradation of task reward, FID score, or CLIP score.*
20
-
21
- The original codebase can be found at [AndyShih12/paradigms](https://github.com/AndyShih12/paradigms), and the pipeline was contributed by [AndyShih12](https://github.com/AndyShih12). ❤️
22
-
23
- ## Tips
24
-
25
- This pipeline improves sampling speed by running denoising steps in parallel, at the cost of increased total FLOPs.
26
- Therefore, it is better to call this pipeline when running on multiple GPUs. Otherwise, without enough GPU bandwidth
27
- sampling may be even slower than sequential sampling.
28
-
29
- The two parameters to play with are `parallel` (batch size) and `tolerance`.
30
- - If it fits in memory, for a 1000-step DDPM you can aim for a batch size of around 100
31
- (for example, 8 GPUs and `batch_per_device=12` to get `parallel=96`). A higher batch size
32
- may not fit in memory, and lower batch size gives less parallelism.
33
- - For tolerance, using a higher tolerance may get better speedups but can risk sample quality degradation.
34
- If there is quality degradation with the default tolerance, then use a lower tolerance like `0.001`.
35
-
36
- For a 1000-step DDPM on 8 A100 GPUs, you can expect around a 3x speedup from [`StableDiffusionParadigmsPipeline`] compared to the [`StableDiffusionPipeline`]
37
- by setting `parallel=80` and `tolerance=0.1`.
38
-
39
- 🤗 Diffusers offers [distributed inference support](../training/distributed_inference) for generating multiple prompts
40
- in parallel on multiple GPUs. But [`StableDiffusionParadigmsPipeline`] is designed for speeding up sampling of a single prompt by using multiple GPUs.
41
-
42
- <Tip>
43
-
44
- Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
45
-
46
- </Tip>
47
-
48
- ## StableDiffusionParadigmsPipeline
49
- [[autodoc]] StableDiffusionParadigmsPipeline
50
- - __call__
51
- - all
52
-
53
- ## StableDiffusionPipelineOutput
54
- [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/custom_pipeline_overview.md DELETED
@@ -1,56 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Load community pipelines
14
-
15
- [[open-in-colab]]
16
-
17
- Community pipelines are any [`DiffusionPipeline`] class that are different from the original implementation as specified in their paper (for example, the [`StableDiffusionControlNetPipeline`] corresponds to the [Text-to-Image Generation with ControlNet Conditioning](https://arxiv.org/abs/2302.05543) paper). They provide additional functionality or extend the original implementation of a pipeline.
18
-
19
- There are many cool community pipelines like [Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) or [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion), and you can find all the official community pipelines [here](https://github.com/huggingface/diffusers/tree/main/examples/community).
20
-
21
- To load any community pipeline on the Hub, pass the repository id of the community pipeline to the `custom_pipeline` argument and the model repository where you'd like to load the pipeline weights and components from. For example, the example below loads a dummy pipeline from [`hf-internal-testing/diffusers-dummy-pipeline`](https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py) and the pipeline weights and components from [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32):
22
-
23
- <Tip warning={true}>
24
-
25
- 🔒 By loading a community pipeline from the Hugging Face Hub, you are trusting that the code you are loading is safe. Make sure to inspect the code online before loading and running it automatically!
26
-
27
- </Tip>
28
-
29
- ```py
30
- from diffusers import DiffusionPipeline
31
-
32
- pipeline = DiffusionPipeline.from_pretrained(
33
- "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
34
- )
35
- ```
36
-
37
- Loading an official community pipeline is similar, but you can mix loading weights from an official repository id and pass pipeline components directly. The example below loads the community [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) pipeline, and you can pass the CLIP model components directly to it:
38
-
39
- ```py
40
- from diffusers import DiffusionPipeline
41
- from transformers import CLIPImageProcessor, CLIPModel
42
-
43
- clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
44
-
45
- feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
46
- clip_model = CLIPModel.from_pretrained(clip_model_id)
47
-
48
- pipeline = DiffusionPipeline.from_pretrained(
49
- "runwayml/stable-diffusion-v1-5",
50
- custom_pipeline="clip_guided_stable_diffusion",
51
- clip_model=clip_model,
52
- feature_extractor=feature_extractor,
53
- )
54
- ```
55
-
56
- For more information about community pipelines, take a look at the [Community pipelines](custom_pipeline_examples) guide for how to use them and if you're interested in adding a community pipeline check out the [How to contribute a community pipeline](contribute_pipeline) guide!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/reinforcement_learning/run_diffuser_locomotion.py DELETED
@@ -1,59 +0,0 @@
1
- import d4rl # noqa
2
- import gym
3
- import tqdm
4
- from diffusers.experimental import ValueGuidedRLPipeline
5
-
6
-
7
- config = {
8
- "n_samples": 64,
9
- "horizon": 32,
10
- "num_inference_steps": 20,
11
- "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
12
- "scale_grad_by_std": True,
13
- "scale": 0.1,
14
- "eta": 0.0,
15
- "t_grad_cutoff": 2,
16
- "device": "cpu",
17
- }
18
-
19
-
20
- if __name__ == "__main__":
21
- env_name = "hopper-medium-v2"
22
- env = gym.make(env_name)
23
-
24
- pipeline = ValueGuidedRLPipeline.from_pretrained(
25
- "bglick13/hopper-medium-v2-value-function-hor32",
26
- env=env,
27
- )
28
-
29
- env.seed(0)
30
- obs = env.reset()
31
- total_reward = 0
32
- total_score = 0
33
- T = 1000
34
- rollout = [obs.copy()]
35
- try:
36
- for t in tqdm.tqdm(range(T)):
37
- # call the policy
38
- denorm_actions = pipeline(obs, planning_horizon=32)
39
-
40
- # execute action in environment
41
- next_observation, reward, terminal, _ = env.step(denorm_actions)
42
- score = env.get_normalized_score(total_reward)
43
-
44
- # update return
45
- total_reward += reward
46
- total_score += score
47
- print(
48
- f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
49
- f" {total_score}"
50
- )
51
-
52
- # save observations for rendering
53
- rollout.append(next_observation.copy())
54
-
55
- obs = next_observation
56
- except KeyboardInterrupt:
57
- pass
58
-
59
- print(f"Total reward: {total_reward}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py DELETED
@@ -1,935 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import os
17
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
-
19
- import torch
20
- from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
21
-
22
- from ...image_processor import VaeImageProcessor
23
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
24
- from ...models import AutoencoderKL, UNet2DConditionModel
25
- from ...models.attention_processor import (
26
- AttnProcessor2_0,
27
- LoRAAttnProcessor2_0,
28
- LoRAXFormersAttnProcessor,
29
- XFormersAttnProcessor,
30
- )
31
- from ...schedulers import KarrasDiffusionSchedulers
32
- from ...utils import (
33
- is_accelerate_available,
34
- is_accelerate_version,
35
- is_invisible_watermark_available,
36
- logging,
37
- randn_tensor,
38
- replace_example_docstring,
39
- )
40
- from ..pipeline_utils import DiffusionPipeline
41
- from . import StableDiffusionXLPipelineOutput
42
-
43
-
44
- if is_invisible_watermark_available():
45
- from .watermark import StableDiffusionXLWatermarker
46
-
47
-
48
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
-
50
- EXAMPLE_DOC_STRING = """
51
- Examples:
52
- ```py
53
- >>> import torch
54
- >>> from diffusers import StableDiffusionXLPipeline
55
-
56
- >>> pipe = StableDiffusionXLPipeline.from_pretrained(
57
- ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
58
- ... )
59
- >>> pipe = pipe.to("cuda")
60
-
61
- >>> prompt = "a photo of an astronaut riding a horse on mars"
62
- >>> image = pipe(prompt).images[0]
63
- ```
64
- """
65
-
66
-
67
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
68
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
69
- """
70
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
71
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
72
- """
73
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
74
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
75
- # rescale the results from guidance (fixes overexposure)
76
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
77
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
78
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
79
- return noise_cfg
80
-
81
-
82
- class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
83
- r"""
84
- Pipeline for text-to-image generation using Stable Diffusion XL.
85
-
86
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
87
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
88
-
89
- In addition the pipeline inherits the following loading methods:
90
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
91
- - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
92
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
93
-
94
- as well as the following saving methods:
95
- - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
96
-
97
- Args:
98
- vae ([`AutoencoderKL`]):
99
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
100
- text_encoder ([`CLIPTextModel`]):
101
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
102
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
103
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
104
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
105
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
106
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
107
- specifically the
108
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
109
- variant.
110
- tokenizer (`CLIPTokenizer`):
111
- Tokenizer of class
112
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
113
- tokenizer_2 (`CLIPTokenizer`):
114
- Second Tokenizer of class
115
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
116
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
117
- scheduler ([`SchedulerMixin`]):
118
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
119
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
120
- """
121
-
122
- def __init__(
123
- self,
124
- vae: AutoencoderKL,
125
- text_encoder: CLIPTextModel,
126
- text_encoder_2: CLIPTextModelWithProjection,
127
- tokenizer: CLIPTokenizer,
128
- tokenizer_2: CLIPTokenizer,
129
- unet: UNet2DConditionModel,
130
- scheduler: KarrasDiffusionSchedulers,
131
- force_zeros_for_empty_prompt: bool = True,
132
- add_watermarker: Optional[bool] = None,
133
- ):
134
- super().__init__()
135
-
136
- self.register_modules(
137
- vae=vae,
138
- text_encoder=text_encoder,
139
- text_encoder_2=text_encoder_2,
140
- tokenizer=tokenizer,
141
- tokenizer_2=tokenizer_2,
142
- unet=unet,
143
- scheduler=scheduler,
144
- )
145
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
146
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
147
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
148
- self.default_sample_size = self.unet.config.sample_size
149
-
150
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
151
-
152
- if add_watermarker:
153
- self.watermark = StableDiffusionXLWatermarker()
154
- else:
155
- self.watermark = None
156
-
157
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
158
- def enable_vae_slicing(self):
159
- r"""
160
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
161
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
162
- """
163
- self.vae.enable_slicing()
164
-
165
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
166
- def disable_vae_slicing(self):
167
- r"""
168
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
169
- computing decoding in one step.
170
- """
171
- self.vae.disable_slicing()
172
-
173
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
174
- def enable_vae_tiling(self):
175
- r"""
176
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
177
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
178
- processing larger images.
179
- """
180
- self.vae.enable_tiling()
181
-
182
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
183
- def disable_vae_tiling(self):
184
- r"""
185
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
186
- computing decoding in one step.
187
- """
188
- self.vae.disable_tiling()
189
-
190
- def enable_model_cpu_offload(self, gpu_id=0):
191
- r"""
192
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
193
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
194
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
195
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
196
- """
197
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
198
- from accelerate import cpu_offload_with_hook
199
- else:
200
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
201
-
202
- device = torch.device(f"cuda:{gpu_id}")
203
-
204
- if self.device.type != "cpu":
205
- self.to("cpu", silence_dtype_warnings=True)
206
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
207
-
208
- model_sequence = (
209
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
210
- )
211
- model_sequence.extend([self.unet, self.vae])
212
-
213
- hook = None
214
- for cpu_offloaded_model in model_sequence:
215
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
216
-
217
- # We'll offload the last model manually.
218
- self.final_offload_hook = hook
219
-
220
- def encode_prompt(
221
- self,
222
- prompt: str,
223
- prompt_2: Optional[str] = None,
224
- device: Optional[torch.device] = None,
225
- num_images_per_prompt: int = 1,
226
- do_classifier_free_guidance: bool = True,
227
- negative_prompt: Optional[str] = None,
228
- negative_prompt_2: Optional[str] = None,
229
- prompt_embeds: Optional[torch.FloatTensor] = None,
230
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
231
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
232
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
233
- lora_scale: Optional[float] = None,
234
- ):
235
- r"""
236
- Encodes the prompt into text encoder hidden states.
237
-
238
- Args:
239
- prompt (`str` or `List[str]`, *optional*):
240
- prompt to be encoded
241
- prompt_2 (`str` or `List[str]`, *optional*):
242
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
243
- used in both text-encoders
244
- device: (`torch.device`):
245
- torch device
246
- num_images_per_prompt (`int`):
247
- number of images that should be generated per prompt
248
- do_classifier_free_guidance (`bool`):
249
- whether to use classifier free guidance or not
250
- negative_prompt (`str` or `List[str]`, *optional*):
251
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
252
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
253
- less than `1`).
254
- negative_prompt_2 (`str` or `List[str]`, *optional*):
255
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
256
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
257
- prompt_embeds (`torch.FloatTensor`, *optional*):
258
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
259
- provided, text embeddings will be generated from `prompt` input argument.
260
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
261
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
262
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
263
- argument.
264
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
265
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
266
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
267
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
268
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
269
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
270
- input argument.
271
- lora_scale (`float`, *optional*):
272
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
273
- """
274
- device = device or self._execution_device
275
-
276
- # set lora scale so that monkey patched LoRA
277
- # function of text encoder can correctly access it
278
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
279
- self._lora_scale = lora_scale
280
-
281
- if prompt is not None and isinstance(prompt, str):
282
- batch_size = 1
283
- elif prompt is not None and isinstance(prompt, list):
284
- batch_size = len(prompt)
285
- else:
286
- batch_size = prompt_embeds.shape[0]
287
-
288
- # Define tokenizers and text encoders
289
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
290
- text_encoders = (
291
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
292
- )
293
-
294
- if prompt_embeds is None:
295
- prompt_2 = prompt_2 or prompt
296
- # textual inversion: procecss multi-vector tokens if necessary
297
- prompt_embeds_list = []
298
- prompts = [prompt, prompt_2]
299
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
300
- if isinstance(self, TextualInversionLoaderMixin):
301
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
302
-
303
- text_inputs = tokenizer(
304
- prompt,
305
- padding="max_length",
306
- max_length=tokenizer.model_max_length,
307
- truncation=True,
308
- return_tensors="pt",
309
- )
310
-
311
- text_input_ids = text_inputs.input_ids
312
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
313
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
314
-
315
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
316
- text_input_ids, untruncated_ids
317
- ):
318
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
319
- logger.warning(
320
- "The following part of your input was truncated because CLIP can only handle sequences up to"
321
- f" {tokenizer.model_max_length} tokens: {removed_text}"
322
- )
323
-
324
- prompt_embeds = text_encoder(
325
- text_input_ids.to(device),
326
- output_hidden_states=True,
327
- )
328
-
329
- # We are only ALWAYS interested in the pooled output of the final text encoder
330
- pooled_prompt_embeds = prompt_embeds[0]
331
- prompt_embeds = prompt_embeds.hidden_states[-2]
332
-
333
- prompt_embeds_list.append(prompt_embeds)
334
-
335
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
336
-
337
- # get unconditional embeddings for classifier free guidance
338
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
339
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
340
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
341
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
342
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
343
- negative_prompt = negative_prompt or ""
344
- negative_prompt_2 = negative_prompt_2 or negative_prompt
345
-
346
- uncond_tokens: List[str]
347
- if prompt is not None and type(prompt) is not type(negative_prompt):
348
- raise TypeError(
349
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
350
- f" {type(prompt)}."
351
- )
352
- elif isinstance(negative_prompt, str):
353
- uncond_tokens = [negative_prompt, negative_prompt_2]
354
- elif batch_size != len(negative_prompt):
355
- raise ValueError(
356
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
357
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
358
- " the batch size of `prompt`."
359
- )
360
- else:
361
- uncond_tokens = [negative_prompt, negative_prompt_2]
362
-
363
- negative_prompt_embeds_list = []
364
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
365
- if isinstance(self, TextualInversionLoaderMixin):
366
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
367
-
368
- max_length = prompt_embeds.shape[1]
369
- uncond_input = tokenizer(
370
- negative_prompt,
371
- padding="max_length",
372
- max_length=max_length,
373
- truncation=True,
374
- return_tensors="pt",
375
- )
376
-
377
- negative_prompt_embeds = text_encoder(
378
- uncond_input.input_ids.to(device),
379
- output_hidden_states=True,
380
- )
381
- # We are only ALWAYS interested in the pooled output of the final text encoder
382
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
383
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
384
-
385
- negative_prompt_embeds_list.append(negative_prompt_embeds)
386
-
387
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
388
-
389
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
390
- bs_embed, seq_len, _ = prompt_embeds.shape
391
- # duplicate text embeddings for each generation per prompt, using mps friendly method
392
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
393
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
394
-
395
- if do_classifier_free_guidance:
396
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
397
- seq_len = negative_prompt_embeds.shape[1]
398
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
399
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
400
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
401
-
402
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
403
- bs_embed * num_images_per_prompt, -1
404
- )
405
- if do_classifier_free_guidance:
406
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
407
- bs_embed * num_images_per_prompt, -1
408
- )
409
-
410
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
411
-
412
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
413
- def prepare_extra_step_kwargs(self, generator, eta):
414
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
415
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
416
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
417
- # and should be between [0, 1]
418
-
419
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
420
- extra_step_kwargs = {}
421
- if accepts_eta:
422
- extra_step_kwargs["eta"] = eta
423
-
424
- # check if the scheduler accepts generator
425
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
426
- if accepts_generator:
427
- extra_step_kwargs["generator"] = generator
428
- return extra_step_kwargs
429
-
430
- def check_inputs(
431
- self,
432
- prompt,
433
- prompt_2,
434
- height,
435
- width,
436
- callback_steps,
437
- negative_prompt=None,
438
- negative_prompt_2=None,
439
- prompt_embeds=None,
440
- negative_prompt_embeds=None,
441
- pooled_prompt_embeds=None,
442
- negative_pooled_prompt_embeds=None,
443
- ):
444
- if height % 8 != 0 or width % 8 != 0:
445
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
446
-
447
- if (callback_steps is None) or (
448
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
449
- ):
450
- raise ValueError(
451
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
452
- f" {type(callback_steps)}."
453
- )
454
-
455
- if prompt is not None and prompt_embeds is not None:
456
- raise ValueError(
457
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
458
- " only forward one of the two."
459
- )
460
- elif prompt_2 is not None and prompt_embeds is not None:
461
- raise ValueError(
462
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
463
- " only forward one of the two."
464
- )
465
- elif prompt is None and prompt_embeds is None:
466
- raise ValueError(
467
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
468
- )
469
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
470
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
471
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
472
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
473
-
474
- if negative_prompt is not None and negative_prompt_embeds is not None:
475
- raise ValueError(
476
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
477
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
478
- )
479
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
480
- raise ValueError(
481
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
482
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
483
- )
484
-
485
- if prompt_embeds is not None and negative_prompt_embeds is not None:
486
- if prompt_embeds.shape != negative_prompt_embeds.shape:
487
- raise ValueError(
488
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
489
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
490
- f" {negative_prompt_embeds.shape}."
491
- )
492
-
493
- if prompt_embeds is not None and pooled_prompt_embeds is None:
494
- raise ValueError(
495
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
496
- )
497
-
498
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
499
- raise ValueError(
500
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
501
- )
502
-
503
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
504
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
505
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
506
- if isinstance(generator, list) and len(generator) != batch_size:
507
- raise ValueError(
508
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
509
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
510
- )
511
-
512
- if latents is None:
513
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
514
- else:
515
- latents = latents.to(device)
516
-
517
- # scale the initial noise by the standard deviation required by the scheduler
518
- latents = latents * self.scheduler.init_noise_sigma
519
- return latents
520
-
521
- def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
522
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
523
-
524
- passed_add_embed_dim = (
525
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
526
- )
527
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
528
-
529
- if expected_add_embed_dim != passed_add_embed_dim:
530
- raise ValueError(
531
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
532
- )
533
-
534
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
535
- return add_time_ids
536
-
537
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
538
- def upcast_vae(self):
539
- dtype = self.vae.dtype
540
- self.vae.to(dtype=torch.float32)
541
- use_torch_2_0_or_xformers = isinstance(
542
- self.vae.decoder.mid_block.attentions[0].processor,
543
- (
544
- AttnProcessor2_0,
545
- XFormersAttnProcessor,
546
- LoRAXFormersAttnProcessor,
547
- LoRAAttnProcessor2_0,
548
- ),
549
- )
550
- # if xformers or torch_2_0 is used attention block does not need
551
- # to be in float32 which can save lots of memory
552
- if use_torch_2_0_or_xformers:
553
- self.vae.post_quant_conv.to(dtype)
554
- self.vae.decoder.conv_in.to(dtype)
555
- self.vae.decoder.mid_block.to(dtype)
556
-
557
- @torch.no_grad()
558
- @replace_example_docstring(EXAMPLE_DOC_STRING)
559
- def __call__(
560
- self,
561
- prompt: Union[str, List[str]] = None,
562
- prompt_2: Optional[Union[str, List[str]]] = None,
563
- height: Optional[int] = None,
564
- width: Optional[int] = None,
565
- num_inference_steps: int = 50,
566
- denoising_end: Optional[float] = None,
567
- guidance_scale: float = 5.0,
568
- negative_prompt: Optional[Union[str, List[str]]] = None,
569
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
570
- num_images_per_prompt: Optional[int] = 1,
571
- eta: float = 0.0,
572
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
573
- latents: Optional[torch.FloatTensor] = None,
574
- prompt_embeds: Optional[torch.FloatTensor] = None,
575
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
576
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
577
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
578
- output_type: Optional[str] = "pil",
579
- return_dict: bool = True,
580
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
581
- callback_steps: int = 1,
582
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
583
- guidance_rescale: float = 0.0,
584
- original_size: Optional[Tuple[int, int]] = None,
585
- crops_coords_top_left: Tuple[int, int] = (0, 0),
586
- target_size: Optional[Tuple[int, int]] = None,
587
- ):
588
- r"""
589
- Function invoked when calling the pipeline for generation.
590
-
591
- Args:
592
- prompt (`str` or `List[str]`, *optional*):
593
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
594
- instead.
595
- prompt_2 (`str` or `List[str]`, *optional*):
596
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
597
- used in both text-encoders
598
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
599
- The height in pixels of the generated image.
600
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
601
- The width in pixels of the generated image.
602
- num_inference_steps (`int`, *optional*, defaults to 50):
603
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
604
- expense of slower inference.
605
- denoising_end (`float`, *optional*):
606
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
607
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
608
- still retain a substantial amount of noise as determined by the discrete timesteps selected by the
609
- scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
610
- "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
611
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
612
- guidance_scale (`float`, *optional*, defaults to 7.5):
613
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
614
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
615
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
616
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
617
- usually at the expense of lower image quality.
618
- negative_prompt (`str` or `List[str]`, *optional*):
619
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
620
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
621
- less than `1`).
622
- negative_prompt_2 (`str` or `List[str]`, *optional*):
623
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
624
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
625
- num_images_per_prompt (`int`, *optional*, defaults to 1):
626
- The number of images to generate per prompt.
627
- eta (`float`, *optional*, defaults to 0.0):
628
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
629
- [`schedulers.DDIMScheduler`], will be ignored for others.
630
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
631
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
632
- to make generation deterministic.
633
- latents (`torch.FloatTensor`, *optional*):
634
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
635
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
636
- tensor will ge generated by sampling using the supplied random `generator`.
637
- prompt_embeds (`torch.FloatTensor`, *optional*):
638
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
639
- provided, text embeddings will be generated from `prompt` input argument.
640
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
641
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
642
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
643
- argument.
644
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
645
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
646
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
647
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
648
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
649
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
650
- input argument.
651
- output_type (`str`, *optional*, defaults to `"pil"`):
652
- The output format of the generate image. Choose between
653
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
654
- return_dict (`bool`, *optional*, defaults to `True`):
655
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
656
- of a plain tuple.
657
- callback (`Callable`, *optional*):
658
- A function that will be called every `callback_steps` steps during inference. The function will be
659
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
660
- callback_steps (`int`, *optional*, defaults to 1):
661
- The frequency at which the `callback` function will be called. If not specified, the callback will be
662
- called at every step.
663
- cross_attention_kwargs (`dict`, *optional*):
664
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
665
- `self.processor` in
666
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
667
- guidance_rescale (`float`, *optional*, defaults to 0.7):
668
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
669
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
670
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
671
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
672
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
673
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
674
- `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
675
- explained in section 2.2 of
676
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
677
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
678
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
679
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
680
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
681
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
682
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
683
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
684
- not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
685
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
686
-
687
- Examples:
688
-
689
- Returns:
690
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
691
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
692
- `tuple`. When returning a tuple, the first element is a list with the generated images.
693
- """
694
- # 0. Default height and width to unet
695
- height = height or self.default_sample_size * self.vae_scale_factor
696
- width = width or self.default_sample_size * self.vae_scale_factor
697
-
698
- original_size = original_size or (height, width)
699
- target_size = target_size or (height, width)
700
-
701
- # 1. Check inputs. Raise error if not correct
702
- self.check_inputs(
703
- prompt,
704
- prompt_2,
705
- height,
706
- width,
707
- callback_steps,
708
- negative_prompt,
709
- negative_prompt_2,
710
- prompt_embeds,
711
- negative_prompt_embeds,
712
- pooled_prompt_embeds,
713
- negative_pooled_prompt_embeds,
714
- )
715
-
716
- # 2. Define call parameters
717
- if prompt is not None and isinstance(prompt, str):
718
- batch_size = 1
719
- elif prompt is not None and isinstance(prompt, list):
720
- batch_size = len(prompt)
721
- else:
722
- batch_size = prompt_embeds.shape[0]
723
-
724
- device = self._execution_device
725
-
726
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
727
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
728
- # corresponds to doing no classifier free guidance.
729
- do_classifier_free_guidance = guidance_scale > 1.0
730
-
731
- # 3. Encode input prompt
732
- text_encoder_lora_scale = (
733
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
734
- )
735
- (
736
- prompt_embeds,
737
- negative_prompt_embeds,
738
- pooled_prompt_embeds,
739
- negative_pooled_prompt_embeds,
740
- ) = self.encode_prompt(
741
- prompt=prompt,
742
- prompt_2=prompt_2,
743
- device=device,
744
- num_images_per_prompt=num_images_per_prompt,
745
- do_classifier_free_guidance=do_classifier_free_guidance,
746
- negative_prompt=negative_prompt,
747
- negative_prompt_2=negative_prompt_2,
748
- prompt_embeds=prompt_embeds,
749
- negative_prompt_embeds=negative_prompt_embeds,
750
- pooled_prompt_embeds=pooled_prompt_embeds,
751
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
752
- lora_scale=text_encoder_lora_scale,
753
- )
754
-
755
- # 4. Prepare timesteps
756
- self.scheduler.set_timesteps(num_inference_steps, device=device)
757
-
758
- timesteps = self.scheduler.timesteps
759
-
760
- # 5. Prepare latent variables
761
- num_channels_latents = self.unet.config.in_channels
762
- latents = self.prepare_latents(
763
- batch_size * num_images_per_prompt,
764
- num_channels_latents,
765
- height,
766
- width,
767
- prompt_embeds.dtype,
768
- device,
769
- generator,
770
- latents,
771
- )
772
-
773
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
774
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
775
-
776
- # 7. Prepare added time ids & embeddings
777
- add_text_embeds = pooled_prompt_embeds
778
- add_time_ids = self._get_add_time_ids(
779
- original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
780
- )
781
-
782
- if do_classifier_free_guidance:
783
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
784
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
785
- add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
786
-
787
- prompt_embeds = prompt_embeds.to(device)
788
- add_text_embeds = add_text_embeds.to(device)
789
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
790
-
791
- # 8. Denoising loop
792
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
793
-
794
- # 7.1 Apply denoising_end
795
- if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
796
- discrete_timestep_cutoff = int(
797
- round(
798
- self.scheduler.config.num_train_timesteps
799
- - (denoising_end * self.scheduler.config.num_train_timesteps)
800
- )
801
- )
802
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
803
- timesteps = timesteps[:num_inference_steps]
804
-
805
- with self.progress_bar(total=num_inference_steps) as progress_bar:
806
- for i, t in enumerate(timesteps):
807
- # expand the latents if we are doing classifier free guidance
808
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
809
-
810
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
811
-
812
- # predict the noise residual
813
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
814
- noise_pred = self.unet(
815
- latent_model_input,
816
- t,
817
- encoder_hidden_states=prompt_embeds,
818
- cross_attention_kwargs=cross_attention_kwargs,
819
- added_cond_kwargs=added_cond_kwargs,
820
- return_dict=False,
821
- )[0]
822
-
823
- # perform guidance
824
- if do_classifier_free_guidance:
825
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
826
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
827
-
828
- if do_classifier_free_guidance and guidance_rescale > 0.0:
829
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
830
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
831
-
832
- # compute the previous noisy sample x_t -> x_t-1
833
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
834
-
835
- # call the callback, if provided
836
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
837
- progress_bar.update()
838
- if callback is not None and i % callback_steps == 0:
839
- callback(i, t, latents)
840
-
841
- # make sure the VAE is in float32 mode, as it overflows in float16
842
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
843
- self.upcast_vae()
844
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
845
-
846
- if not output_type == "latent":
847
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
848
- else:
849
- image = latents
850
- return StableDiffusionXLPipelineOutput(images=image)
851
-
852
- # apply watermark if available
853
- if self.watermark is not None:
854
- image = self.watermark.apply_watermark(image)
855
-
856
- image = self.image_processor.postprocess(image, output_type=output_type)
857
-
858
- # Offload last model to CPU
859
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
860
- self.final_offload_hook.offload()
861
-
862
- if not return_dict:
863
- return (image,)
864
-
865
- return StableDiffusionXLPipelineOutput(images=image)
866
-
867
- # Overrride to properly handle the loading and unloading of the additional text encoder.
868
- def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
869
- # We could have accessed the unet config from `lora_state_dict()` too. We pass
870
- # it here explicitly to be able to tell that it's coming from an SDXL
871
- # pipeline.
872
- state_dict, network_alphas = self.lora_state_dict(
873
- pretrained_model_name_or_path_or_dict,
874
- unet_config=self.unet.config,
875
- **kwargs,
876
- )
877
- self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
878
-
879
- text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
880
- if len(text_encoder_state_dict) > 0:
881
- self.load_lora_into_text_encoder(
882
- text_encoder_state_dict,
883
- network_alphas=network_alphas,
884
- text_encoder=self.text_encoder,
885
- prefix="text_encoder",
886
- lora_scale=self.lora_scale,
887
- )
888
-
889
- text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
890
- if len(text_encoder_2_state_dict) > 0:
891
- self.load_lora_into_text_encoder(
892
- text_encoder_2_state_dict,
893
- network_alphas=network_alphas,
894
- text_encoder=self.text_encoder_2,
895
- prefix="text_encoder_2",
896
- lora_scale=self.lora_scale,
897
- )
898
-
899
- @classmethod
900
- def save_lora_weights(
901
- self,
902
- save_directory: Union[str, os.PathLike],
903
- unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
904
- text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
905
- text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
906
- is_main_process: bool = True,
907
- weight_name: str = None,
908
- save_function: Callable = None,
909
- safe_serialization: bool = False,
910
- ):
911
- state_dict = {}
912
-
913
- def pack_weights(layers, prefix):
914
- layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
915
- layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
916
- return layers_state_dict
917
-
918
- state_dict.update(pack_weights(unet_lora_layers, "unet"))
919
-
920
- if text_encoder_lora_layers and text_encoder_2_lora_layers:
921
- state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
922
- state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
923
-
924
- self.write_lora_layers(
925
- state_dict=state_dict,
926
- save_directory=save_directory,
927
- is_main_process=is_main_process,
928
- weight_name=weight_name,
929
- save_function=save_function,
930
- safe_serialization=safe_serialization,
931
- )
932
-
933
- def _remove_text_encoder_monkey_patch(self):
934
- self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
935
- self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_dependencies.py DELETED
@@ -1,39 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import unittest
17
-
18
-
19
- class DependencyTester(unittest.TestCase):
20
- def test_diffusers_import(self):
21
- try:
22
- import diffusers # noqa: F401
23
- except ImportError:
24
- assert False
25
-
26
- def test_backend_registration(self):
27
- import diffusers
28
- from diffusers.dependency_versions_table import deps
29
-
30
- all_classes = inspect.getmembers(diffusers, inspect.isclass)
31
-
32
- for cls_name, cls_module in all_classes:
33
- if "dummy_" in cls_module.__module__:
34
- for backend in cls_module._backends:
35
- if backend == "k_diffusion":
36
- backend = "k-diffusion"
37
- elif backend == "invisible_watermark":
38
- backend = "invisible-watermark"
39
- assert backend in deps, f"{backend} is not in the deps table!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_32x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=32,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './danet_r50-d8_512x512_20k_voc12aug.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/shared.py DELETED
@@ -1,275 +0,0 @@
1
- import argparse
2
- import sys
3
- from collections import OrderedDict
4
- from pathlib import Path
5
-
6
- import yaml
7
-
8
- from modules.logging_colors import logger
9
-
10
- # Model variables
11
- model = None
12
- tokenizer = None
13
- model_name = "None"
14
- is_seq2seq = False
15
- model_dirty_from_training = False
16
- lora_names = []
17
-
18
- # Generation variables
19
- stop_everything = False
20
- generation_lock = None
21
- processing_message = '*Is typing...*'
22
-
23
- # UI variables
24
- gradio = {}
25
- persistent_interface_state = {}
26
- need_restart = False
27
-
28
- # UI defaults
29
- settings = {
30
- 'dark_theme': True,
31
- 'show_controls': True,
32
- 'start_with': '',
33
- 'mode': 'chat',
34
- 'chat_style': 'cai-chat',
35
- 'prompt-default': 'QA',
36
- 'prompt-notebook': 'QA',
37
- 'preset': 'simple-1',
38
- 'max_new_tokens': 200,
39
- 'max_new_tokens_min': 1,
40
- 'max_new_tokens_max': 4096,
41
- 'seed': -1,
42
- 'negative_prompt': '',
43
- 'truncation_length': 2048,
44
- 'truncation_length_min': 0,
45
- 'truncation_length_max': 32768,
46
- 'custom_stopping_strings': '',
47
- 'auto_max_new_tokens': False,
48
- 'max_tokens_second': 0,
49
- 'ban_eos_token': False,
50
- 'custom_token_bans': '',
51
- 'add_bos_token': True,
52
- 'skip_special_tokens': True,
53
- 'stream': True,
54
- 'name1': 'You',
55
- 'character': 'Assistant',
56
- 'instruction_template': 'Alpaca',
57
- 'chat-instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
58
- 'autoload_model': False,
59
- 'default_extensions': ['gallery'],
60
- }
61
-
62
-
63
- def str2bool(v):
64
- if isinstance(v, bool):
65
- return v
66
- if v.lower() in ('yes', 'true', 't', 'y', '1'):
67
- return True
68
- elif v.lower() in ('no', 'false', 'f', 'n', '0'):
69
- return False
70
- else:
71
- raise argparse.ArgumentTypeError('Boolean value expected.')
72
-
73
-
74
- parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54))
75
-
76
- # Basic settings
77
- parser.add_argument('--notebook', action='store_true', help='DEPRECATED')
78
- parser.add_argument('--chat', action='store_true', help='DEPRECATED')
79
- parser.add_argument('--multi-user', action='store_true', help='Multi-user mode. Chat histories are not saved or automatically loaded. WARNING: this is highly experimental.')
80
- parser.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.')
81
- parser.add_argument('--model', type=str, help='Name of the model to load by default.')
82
- parser.add_argument('--lora', type=str, nargs="+", help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
83
- parser.add_argument("--model-dir", type=str, default='models/', help="Path to directory with all the models")
84
- parser.add_argument("--lora-dir", type=str, default='loras/', help="Path to directory with all the loras")
85
- parser.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
86
- parser.add_argument('--no-stream', action='store_true', help='DEPRECATED')
87
- parser.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
88
- parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
89
- parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
90
- parser.add_argument('--chat-buttons', action='store_true', help='Show buttons on chat tab instead of hover menu.')
91
-
92
- # Model loader
93
- parser.add_argument('--loader', type=str, help='Choose the model loader manually, otherwise, it will get autodetected. Valid options: transformers, autogptq, gptq-for-llama, exllama, exllama_hf, llamacpp, rwkv')
94
-
95
- # Accelerate/transformers
96
- parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.')
97
- parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
98
- parser.add_argument('--gpu-memory', type=str, nargs="+", help='Maximum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs. You can also set values in MiB like --gpu-memory 3500MiB.')
99
- parser.add_argument('--cpu-memory', type=str, help='Maximum CPU memory in GiB to allocate for offloaded weights. Same as above.')
100
- parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
101
- parser.add_argument('--disk-cache-dir', type=str, default="cache", help='Directory to save the disk cache to. Defaults to "cache".')
102
- parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision (using bitsandbytes).')
103
- parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
104
- parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
105
- parser.add_argument('--xformers', action='store_true', help="Use xformer's memory efficient attention. This should increase your tokens/s.")
106
- parser.add_argument('--sdp-attention', action='store_true', help="Use torch 2.0's sdp attention.")
107
- parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM and Falcon.")
108
- parser.add_argument('--use_fast', action='store_true', help="Set use_fast=True while loading a tokenizer.")
109
-
110
- # Accelerate 4-bit
111
- parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision (using bitsandbytes).')
112
- parser.add_argument('--compute_dtype', type=str, default="float16", help="compute dtype for 4-bit. Valid options: bfloat16, float16, float32.")
113
- parser.add_argument('--quant_type', type=str, default="nf4", help='quant_type for 4-bit. Valid options: nf4, fp4.')
114
- parser.add_argument('--use_double_quant', action='store_true', help='use_double_quant for 4-bit.')
115
-
116
- # llama.cpp
117
- parser.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
118
- parser.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')
119
- parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of prompt tokens to batch together when calling llama_eval.')
120
- parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
121
- parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
122
- parser.add_argument('--mul_mat_q', action='store_true', help='Activate new mulmat kernels.')
123
- parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
124
- parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
125
- parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17")
126
- parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
127
- parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
128
- parser.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp')
129
-
130
- # GPTQ
131
- parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
132
- parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
133
- parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
134
- parser.add_argument('--pre_layer', type=int, nargs="+", help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg --pre_layer 30 60.')
135
- parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.')
136
- parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
137
-
138
- # AutoGPTQ
139
- parser.add_argument('--triton', action='store_true', help='Use triton.')
140
- parser.add_argument('--no_inject_fused_attention', action='store_true', help='Do not use fused attention (lowers VRAM requirements).')
141
- parser.add_argument('--no_inject_fused_mlp', action='store_true', help='Triton mode only: Do not use fused MLP (lowers VRAM requirements).')
142
- parser.add_argument('--no_use_cuda_fp16', action='store_true', help='This can make models faster on some systems.')
143
- parser.add_argument('--desc_act', action='store_true', help='For models that don\'t have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig.')
144
- parser.add_argument('--disable_exllama', action='store_true', help='Disable ExLlama kernel, which can improve inference speed on some systems.')
145
-
146
- # ExLlama
147
- parser.add_argument('--gpu-split', type=str, help="Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. 20,7,7")
148
- parser.add_argument('--max_seq_len', type=int, default=2048, help="Maximum sequence length.")
149
- parser.add_argument('--cfg-cache', action='store_true', help="ExLlama_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader, but not necessary for CFG with base ExLlama.")
150
-
151
- # DeepSpeed
152
- parser.add_argument('--deepspeed', action='store_true', help='Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration.')
153
- parser.add_argument('--nvme-offload-dir', type=str, help='DeepSpeed: Directory to use for ZeRO-3 NVME offloading.')
154
- parser.add_argument('--local_rank', type=int, default=0, help='DeepSpeed: Optional argument for distributed setups.')
155
-
156
- # RWKV
157
- parser.add_argument('--rwkv-strategy', type=str, default=None, help='RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".')
158
- parser.add_argument('--rwkv-cuda-on', action='store_true', help='RWKV: Compile the CUDA kernel for better performance.')
159
-
160
- # RoPE
161
- parser.add_argument('--alpha_value', type=float, default=1, help="Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both.")
162
- parser.add_argument('--rope_freq_base', type=int, default=0, help="If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63).")
163
- parser.add_argument('--compress_pos_emb', type=int, default=1, help="Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.")
164
-
165
- # Gradio
166
- parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
167
- parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
168
- parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
169
- parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
170
- parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
171
- parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
172
- parser.add_argument("--gradio-auth-path", type=str, help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"', default=None)
173
- parser.add_argument("--ssl-keyfile", type=str, help='The path to the SSL certificate key file.', default=None)
174
- parser.add_argument("--ssl-certfile", type=str, help='The path to the SSL certificate cert file.', default=None)
175
-
176
- # API
177
- parser.add_argument('--api', action='store_true', help='Enable the API extension.')
178
- parser.add_argument('--api-blocking-port', type=int, default=5000, help='The listening port for the blocking API.')
179
- parser.add_argument('--api-streaming-port', type=int, default=5005, help='The listening port for the streaming API.')
180
- parser.add_argument('--public-api', action='store_true', help='Create a public URL for the API using Cloudfare.')
181
- parser.add_argument('--public-api-id', type=str, help='Tunnel ID for named Cloudflare Tunnel. Use together with public-api option.', default=None)
182
-
183
- # Multimodal
184
- parser.add_argument('--multimodal-pipeline', type=str, default=None, help='The multimodal pipeline to use. Examples: llava-7b, llava-13b.')
185
-
186
- args = parser.parse_args()
187
- args_defaults = parser.parse_args([])
188
- provided_arguments = []
189
- for arg in sys.argv[1:]:
190
- arg = arg.lstrip('-').replace('-', '_')
191
- if hasattr(args, arg):
192
- provided_arguments.append(arg)
193
-
194
- # Deprecation warnings
195
- for k in ['chat', 'notebook', 'no_stream']:
196
- if getattr(args, k):
197
- logger.warning(f'The --{k} flag has been deprecated and will be removed soon. Please remove that flag.')
198
-
199
- # Security warnings
200
- if args.trust_remote_code:
201
- logger.warning("trust_remote_code is enabled. This is dangerous.")
202
- if args.share:
203
- logger.warning("The gradio \"share link\" feature uses a proprietary executable to create a reverse tunnel. Use it with care.")
204
- if any((args.listen, args.share)) and not any((args.gradio_auth, args.gradio_auth_path)):
205
- logger.warning("\nYou are potentially exposing the web UI to the entire internet without any access password.\nYou can create one with the \"--gradio-auth\" flag like this:\n\n--gradio-auth username:password\n\nMake sure to replace username:password with your own.")
206
- if args.multi_user:
207
- logger.warning("\nThe multi-user mode is highly experimental and should not be shared publicly.")
208
-
209
-
210
- def fix_loader_name(name):
211
- if not name:
212
- return name
213
-
214
- name = name.lower()
215
- if name in ['llamacpp', 'llama.cpp', 'llama-cpp', 'llama cpp']:
216
- return 'llama.cpp'
217
- if name in ['llamacpp_hf', 'llama.cpp_hf', 'llama-cpp-hf', 'llamacpp-hf', 'llama.cpp-hf']:
218
- return 'llamacpp_HF'
219
- elif name in ['transformers', 'huggingface', 'hf', 'hugging_face', 'hugging face']:
220
- return 'Transformers'
221
- elif name in ['autogptq', 'auto-gptq', 'auto_gptq', 'auto gptq']:
222
- return 'AutoGPTQ'
223
- elif name in ['gptq-for-llama', 'gptqforllama', 'gptqllama', 'gptq for llama', 'gptq_for_llama']:
224
- return 'GPTQ-for-LLaMa'
225
- elif name in ['exllama', 'ex-llama', 'ex_llama', 'exlama']:
226
- return 'ExLlama'
227
- elif name in ['exllama-hf', 'exllama_hf', 'exllama hf', 'ex-llama-hf', 'ex_llama_hf']:
228
- return 'ExLlama_HF'
229
- elif name in ['exllamav2', 'exllama-v2', 'ex_llama-v2', 'exlamav2', 'exlama-v2', 'exllama2', 'exllama-2']:
230
- return 'ExLlamav2'
231
- elif name in ['exllamav2-hf', 'exllamav2_hf', 'exllama-v2-hf', 'exllama_v2_hf', 'exllama-v2_hf', 'exllama2-hf', 'exllama2_hf', 'exllama-2-hf', 'exllama_2_hf', 'exllama-2_hf']:
232
- return 'ExLlamav2_HF'
233
- elif name in ['ctransformers', 'ctranforemrs', 'ctransformer']:
234
- return 'ctransformers'
235
- elif name in ['autoawq', 'awq', 'auto-awq']:
236
- return 'AutoAWQ'
237
-
238
-
239
- def add_extension(name):
240
- if args.extensions is None:
241
- args.extensions = [name]
242
- elif 'api' not in args.extensions:
243
- args.extensions.append(name)
244
-
245
-
246
- def is_chat():
247
- return True
248
-
249
-
250
- args.loader = fix_loader_name(args.loader)
251
-
252
- # Activate the API extension
253
- if args.api or args.public_api:
254
- add_extension('api')
255
-
256
- # Activate the multimodal extension
257
- if args.multimodal_pipeline is not None:
258
- add_extension('multimodal')
259
-
260
- # Load model-specific settings
261
- with Path(f'{args.model_dir}/config.yaml') as p:
262
- if p.exists():
263
- model_config = yaml.safe_load(open(p, 'r').read())
264
- else:
265
- model_config = {}
266
-
267
- # Load custom model-specific settings
268
- with Path(f'{args.model_dir}/config-user.yaml') as p:
269
- if p.exists():
270
- user_config = yaml.safe_load(open(p, 'r').read())
271
- else:
272
- user_config = {}
273
-
274
- model_config = OrderedDict(model_config)
275
- user_config = OrderedDict(user_config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/utils.py DELETED
@@ -1,20 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from .registry import MODULE_WRAPPERS
3
-
4
-
5
- def is_module_wrapper(module):
6
- """Check if a module is a module wrapper.
7
-
8
- The following 3 modules in MMCV (and their subclasses) are regarded as
9
- module wrappers: DataParallel, DistributedDataParallel,
10
- MMDistributedDataParallel (the deprecated version). You may add you own
11
- module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS.
12
-
13
- Args:
14
- module (nn.Module): The module to be checked.
15
-
16
- Returns:
17
- bool: True if the input module is a module wrapper.
18
- """
19
- module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values())
20
- return isinstance(module, module_wrappers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/gradio_hed2image.py DELETED
@@ -1,98 +0,0 @@
1
- from share import *
2
- import config
3
-
4
- import cv2
5
- import einops
6
- import gradio as gr
7
- import numpy as np
8
- import torch
9
- import random
10
-
11
- from pytorch_lightning import seed_everything
12
- from annotator.util import resize_image, HWC3
13
- from annotator.hed import HEDdetector
14
- from cldm.model import create_model, load_state_dict
15
- from cldm.ddim_hacked import DDIMSampler
16
-
17
-
18
- apply_hed = HEDdetector()
19
-
20
- model = create_model('./models/cldm_v15.yaml').cpu()
21
- model.load_state_dict(load_state_dict('./models/control_sd15_hed.pth', location='cuda'))
22
- model = model.cuda()
23
- ddim_sampler = DDIMSampler(model)
24
-
25
-
26
- def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
27
- with torch.no_grad():
28
- input_image = HWC3(input_image)
29
- detected_map = apply_hed(resize_image(input_image, detect_resolution))
30
- detected_map = HWC3(detected_map)
31
- img = resize_image(input_image, image_resolution)
32
- H, W, C = img.shape
33
-
34
- detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
35
-
36
- control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
37
- control = torch.stack([control for _ in range(num_samples)], dim=0)
38
- control = einops.rearrange(control, 'b h w c -> b c h w').clone()
39
-
40
- if seed == -1:
41
- seed = random.randint(0, 65535)
42
- seed_everything(seed)
43
-
44
- if config.save_memory:
45
- model.low_vram_shift(is_diffusing=False)
46
-
47
- cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
48
- un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
49
- shape = (4, H // 8, W // 8)
50
-
51
- if config.save_memory:
52
- model.low_vram_shift(is_diffusing=True)
53
-
54
- model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
55
- samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
56
- shape, cond, verbose=False, eta=eta,
57
- unconditional_guidance_scale=scale,
58
- unconditional_conditioning=un_cond)
59
-
60
- if config.save_memory:
61
- model.low_vram_shift(is_diffusing=False)
62
-
63
- x_samples = model.decode_first_stage(samples)
64
- x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
65
-
66
- results = [x_samples[i] for i in range(num_samples)]
67
- return [detected_map] + results
68
-
69
-
70
- block = gr.Blocks().queue()
71
- with block:
72
- with gr.Row():
73
- gr.Markdown("## Control Stable Diffusion with HED Maps")
74
- with gr.Row():
75
- with gr.Column():
76
- input_image = gr.Image(source='upload', type="numpy")
77
- prompt = gr.Textbox(label="Prompt")
78
- run_button = gr.Button(label="Run")
79
- with gr.Accordion("Advanced options", open=False):
80
- num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
81
- image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
82
- strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
83
- guess_mode = gr.Checkbox(label='Guess Mode', value=False)
84
- detect_resolution = gr.Slider(label="HED Resolution", minimum=128, maximum=1024, value=512, step=1)
85
- ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
86
- scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
87
- seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
88
- eta = gr.Number(label="eta (DDIM)", value=0.0)
89
- a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
90
- n_prompt = gr.Textbox(label="Negative Prompt",
91
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
92
- with gr.Column():
93
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
94
- ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
95
- run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
96
-
97
-
98
- block.launch(server_name='0.0.0.0')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/aws/__init__.py DELETED
File without changes
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/intranges.py DELETED
@@ -1,54 +0,0 @@
1
- """
2
- Given a list of integers, made up of (hopefully) a small number of long runs
3
- of consecutive integers, compute a representation of the form
4
- ((start1, end1), (start2, end2) ...). Then answer the question "was x present
5
- in the original list?" in time O(log(# runs)).
6
- """
7
-
8
- import bisect
9
- from typing import List, Tuple
10
-
11
- def intranges_from_list(list_: List[int]) -> Tuple[int, ...]:
12
- """Represent a list of integers as a sequence of ranges:
13
- ((start_0, end_0), (start_1, end_1), ...), such that the original
14
- integers are exactly those x such that start_i <= x < end_i for some i.
15
-
16
- Ranges are encoded as single integers (start << 32 | end), not as tuples.
17
- """
18
-
19
- sorted_list = sorted(list_)
20
- ranges = []
21
- last_write = -1
22
- for i in range(len(sorted_list)):
23
- if i+1 < len(sorted_list):
24
- if sorted_list[i] == sorted_list[i+1]-1:
25
- continue
26
- current_range = sorted_list[last_write+1:i+1]
27
- ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
28
- last_write = i
29
-
30
- return tuple(ranges)
31
-
32
- def _encode_range(start: int, end: int) -> int:
33
- return (start << 32) | end
34
-
35
- def _decode_range(r: int) -> Tuple[int, int]:
36
- return (r >> 32), (r & ((1 << 32) - 1))
37
-
38
-
39
- def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool:
40
- """Determine if `int_` falls into one of the ranges in `ranges`."""
41
- tuple_ = _encode_range(int_, 0)
42
- pos = bisect.bisect_left(ranges, tuple_)
43
- # we could be immediately ahead of a tuple (start, end)
44
- # with start < int_ <= end
45
- if pos > 0:
46
- left, right = _decode_range(ranges[pos-1])
47
- if left <= int_ < right:
48
- return True
49
- # or we could be immediately behind a tuple (int_, end)
50
- if pos < len(ranges):
51
- left, _ = _decode_range(ranges[pos])
52
- if left == int_:
53
- return True
54
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/monkey.py DELETED
@@ -1,165 +0,0 @@
1
- """
2
- Monkey patching of distutils.
3
- """
4
-
5
- import sys
6
- import distutils.filelist
7
- import platform
8
- import types
9
- import functools
10
- from importlib import import_module
11
- import inspect
12
-
13
- import setuptools
14
-
15
- __all__ = []
16
- """
17
- Everything is private. Contact the project team
18
- if you think you need this functionality.
19
- """
20
-
21
-
22
- def _get_mro(cls):
23
- """
24
- Returns the bases classes for cls sorted by the MRO.
25
-
26
- Works around an issue on Jython where inspect.getmro will not return all
27
- base classes if multiple classes share the same name. Instead, this
28
- function will return a tuple containing the class itself, and the contents
29
- of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
30
- """
31
- if platform.python_implementation() == "Jython":
32
- return (cls,) + cls.__bases__
33
- return inspect.getmro(cls)
34
-
35
-
36
- def get_unpatched(item):
37
- lookup = (
38
- get_unpatched_class if isinstance(item, type) else
39
- get_unpatched_function if isinstance(item, types.FunctionType) else
40
- lambda item: None
41
- )
42
- return lookup(item)
43
-
44
-
45
- def get_unpatched_class(cls):
46
- """Protect against re-patching the distutils if reloaded
47
-
48
- Also ensures that no other distutils extension monkeypatched the distutils
49
- first.
50
- """
51
- external_bases = (
52
- cls
53
- for cls in _get_mro(cls)
54
- if not cls.__module__.startswith('setuptools')
55
- )
56
- base = next(external_bases)
57
- if not base.__module__.startswith('distutils'):
58
- msg = "distutils has already been patched by %r" % cls
59
- raise AssertionError(msg)
60
- return base
61
-
62
-
63
- def patch_all():
64
- # we can't patch distutils.cmd, alas
65
- distutils.core.Command = setuptools.Command
66
-
67
- has_issue_12885 = sys.version_info <= (3, 5, 3)
68
-
69
- if has_issue_12885:
70
- # fix findall bug in distutils (http://bugs.python.org/issue12885)
71
- distutils.filelist.findall = setuptools.findall
72
-
73
- needs_warehouse = (
74
- (3, 4) < sys.version_info < (3, 4, 6)
75
- or
76
- (3, 5) < sys.version_info <= (3, 5, 3)
77
- )
78
-
79
- if needs_warehouse:
80
- warehouse = 'https://upload.pypi.org/legacy/'
81
- distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
82
-
83
- _patch_distribution_metadata()
84
-
85
- # Install Distribution throughout the distutils
86
- for module in distutils.dist, distutils.core, distutils.cmd:
87
- module.Distribution = setuptools.dist.Distribution
88
-
89
- # Install the patched Extension
90
- distutils.core.Extension = setuptools.extension.Extension
91
- distutils.extension.Extension = setuptools.extension.Extension
92
- if 'distutils.command.build_ext' in sys.modules:
93
- sys.modules['distutils.command.build_ext'].Extension = (
94
- setuptools.extension.Extension
95
- )
96
-
97
- patch_for_msvc_specialized_compiler()
98
-
99
-
100
- def _patch_distribution_metadata():
101
- """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
102
- for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
103
- new_val = getattr(setuptools.dist, attr)
104
- setattr(distutils.dist.DistributionMetadata, attr, new_val)
105
-
106
-
107
- def patch_func(replacement, target_mod, func_name):
108
- """
109
- Patch func_name in target_mod with replacement
110
-
111
- Important - original must be resolved by name to avoid
112
- patching an already patched function.
113
- """
114
- original = getattr(target_mod, func_name)
115
-
116
- # set the 'unpatched' attribute on the replacement to
117
- # point to the original.
118
- vars(replacement).setdefault('unpatched', original)
119
-
120
- # replace the function in the original module
121
- setattr(target_mod, func_name, replacement)
122
-
123
-
124
- def get_unpatched_function(candidate):
125
- return getattr(candidate, 'unpatched')
126
-
127
-
128
- def patch_for_msvc_specialized_compiler():
129
- """
130
- Patch functions in distutils to use standalone Microsoft Visual C++
131
- compilers.
132
- """
133
- # import late to avoid circular imports on Python < 3.5
134
- msvc = import_module('setuptools.msvc')
135
-
136
- if platform.system() != 'Windows':
137
- # Compilers only available on Microsoft Windows
138
- return
139
-
140
- def patch_params(mod_name, func_name):
141
- """
142
- Prepare the parameters for patch_func to patch indicated function.
143
- """
144
- repl_prefix = 'msvc14_'
145
- repl_name = repl_prefix + func_name.lstrip('_')
146
- repl = getattr(msvc, repl_name)
147
- mod = import_module(mod_name)
148
- if not hasattr(mod, func_name):
149
- raise ImportError(func_name)
150
- return repl, mod, func_name
151
-
152
- # Python 3.5+
153
- msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
154
-
155
- try:
156
- # Patch distutils._msvccompiler._get_vc_env
157
- patch_func(*msvc14('_get_vc_env'))
158
- except ImportError:
159
- pass
160
-
161
- try:
162
- # Patch distutils._msvccompiler.gen_lib_options for Numpy
163
- patch_func(*msvc14('gen_lib_options'))
164
- except ImportError:
165
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py DELETED
@@ -1,116 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- # Copyright (c) Facebook, Inc. and its affiliates.
4
-
5
- import functools
6
- import json
7
- import multiprocessing as mp
8
- import numpy as np
9
- import os
10
- import time
11
- from fvcore.common.download import download
12
- from panopticapi.utils import rgb2id
13
- from PIL import Image
14
-
15
- from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
16
-
17
-
18
- def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
19
- panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32)
20
- panoptic = rgb2id(panoptic)
21
- output = np.zeros_like(panoptic, dtype=np.uint8) + 255
22
- for seg in segments:
23
- cat_id = seg["category_id"]
24
- new_cat_id = id_map[cat_id]
25
- output[panoptic == seg["id"]] = new_cat_id
26
- Image.fromarray(output).save(output_semantic)
27
-
28
-
29
- def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories):
30
- """
31
- Create semantic segmentation annotations from panoptic segmentation
32
- annotations, to be used by PanopticFPN.
33
-
34
- It maps all thing categories to class 0, and maps all unlabeled pixels to class 255.
35
- It maps all stuff categories to contiguous ids starting from 1.
36
-
37
- Args:
38
- panoptic_json (str): path to the panoptic json file, in COCO's format.
39
- panoptic_root (str): a directory with panoptic annotation files, in COCO's format.
40
- sem_seg_root (str): a directory to output semantic annotation files
41
- categories (list[dict]): category metadata. Each dict needs to have:
42
- "id": corresponds to the "category_id" in the json annotations
43
- "isthing": 0 or 1
44
- """
45
- os.makedirs(sem_seg_root, exist_ok=True)
46
-
47
- stuff_ids = [k["id"] for k in categories if k["isthing"] == 0]
48
- thing_ids = [k["id"] for k in categories if k["isthing"] == 1]
49
- id_map = {} # map from category id to id in the output semantic annotation
50
- assert len(stuff_ids) <= 254
51
- for i, stuff_id in enumerate(stuff_ids):
52
- id_map[stuff_id] = i + 1
53
- for thing_id in thing_ids:
54
- id_map[thing_id] = 0
55
- id_map[0] = 255
56
-
57
- with open(panoptic_json) as f:
58
- obj = json.load(f)
59
-
60
- pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))
61
-
62
- def iter_annotations():
63
- for anno in obj["annotations"]:
64
- file_name = anno["file_name"]
65
- segments = anno["segments_info"]
66
- input = os.path.join(panoptic_root, file_name)
67
- output = os.path.join(sem_seg_root, file_name)
68
- yield input, output, segments
69
-
70
- print("Start writing to {} ...".format(sem_seg_root))
71
- start = time.time()
72
- pool.starmap(
73
- functools.partial(_process_panoptic_to_semantic, id_map=id_map),
74
- iter_annotations(),
75
- chunksize=100,
76
- )
77
- print("Finished. time: {:.2f}s".format(time.time() - start))
78
-
79
-
80
- if __name__ == "__main__":
81
- dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco")
82
- for s in ["val2017", "train2017"]:
83
- separate_coco_semantic_from_panoptic(
84
- os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)),
85
- os.path.join(dataset_dir, "panoptic_{}".format(s)),
86
- os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)),
87
- COCO_CATEGORIES,
88
- )
89
-
90
- # Prepare val2017_100 for quick testing:
91
-
92
- dest_dir = os.path.join(dataset_dir, "annotations/")
93
- URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
94
- download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir)
95
- with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f:
96
- obj = json.load(f)
97
-
98
- def link_val100(dir_full, dir_100):
99
- print("Creating " + dir_100 + " ...")
100
- os.makedirs(dir_100, exist_ok=True)
101
- for img in obj["images"]:
102
- basename = os.path.splitext(img["file_name"])[0]
103
- src = os.path.join(dir_full, basename + ".png")
104
- dst = os.path.join(dir_100, basename + ".png")
105
- src = os.path.relpath(src, start=dir_100)
106
- os.symlink(src, dst)
107
-
108
- link_val100(
109
- os.path.join(dataset_dir, "panoptic_val2017"),
110
- os.path.join(dataset_dir, "panoptic_val2017_100"),
111
- )
112
-
113
- link_val100(
114
- os.path.join(dataset_dir, "panoptic_stuff_val2017"),
115
- os.path.join(dataset_dir, "panoptic_stuff_val2017_100"),
116
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzinZ/vitscn/preprocess.py DELETED
@@ -1,25 +0,0 @@
1
- import argparse
2
- import text
3
- from utils import load_filepaths_and_text
4
-
5
- if __name__ == '__main__':
6
- parser = argparse.ArgumentParser()
7
- parser.add_argument("--out_extension", default="cleaned")
8
- parser.add_argument("--text_index", default=1, type=int)
9
- parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
10
- parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
11
-
12
- args = parser.parse_args()
13
-
14
-
15
- for filelist in args.filelists:
16
- print("START:", filelist)
17
- filepaths_and_text = load_filepaths_and_text(filelist)
18
- for i in range(len(filepaths_and_text)):
19
- original_text = filepaths_and_text[i][args.text_index]
20
- cleaned_text = text._clean_text(original_text, args.text_cleaners)
21
- filepaths_and_text[i][args.text_index] = cleaned_text
22
-
23
- new_filelist = filelist + "." + args.out_extension
24
- with open(new_filelist, "w", encoding="utf-8") as f:
25
- f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md DELETED
@@ -1,57 +0,0 @@
1
- ---
2
- language:
3
- - zh
4
- tags:
5
- - bert
6
- license: "apache-2.0"
7
- ---
8
-
9
- # Please use 'Bert' related functions to load this model!
10
-
11
- ## Chinese BERT with Whole Word Masking
12
- For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
-
14
- **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
- Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
-
17
- This repository is developed based on:https://github.com/google-research/bert
18
-
19
- You may also interested in,
20
- - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
- - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
- - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
- - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
- - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
-
26
- More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
-
28
- ## Citation
29
- If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
- - Primary: https://arxiv.org/abs/2004.13922
31
- ```
32
- @inproceedings{cui-etal-2020-revisiting,
33
- title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
- author = "Cui, Yiming and
35
- Che, Wanxiang and
36
- Liu, Ting and
37
- Qin, Bing and
38
- Wang, Shijin and
39
- Hu, Guoping",
40
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
- month = nov,
42
- year = "2020",
43
- address = "Online",
44
- publisher = "Association for Computational Linguistics",
45
- url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
- pages = "657--668",
47
- }
48
- ```
49
- - Secondary: https://arxiv.org/abs/1906.08101
50
- ```
51
- @article{chinese-bert-wwm,
52
- title={Pre-Training with Whole Word Masking for Chinese BERT},
53
- author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
- journal={arXiv preprint arXiv:1906.08101},
55
- year={2019}
56
- }
57
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bambicita/rvc-models/infer_pack/attentions.py DELETED
@@ -1,417 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- from infer_pack import commons
9
- from infer_pack import modules
10
- from infer_pack.modules import LayerNorm
11
-
12
-
13
- class Encoder(nn.Module):
14
- def __init__(
15
- self,
16
- hidden_channels,
17
- filter_channels,
18
- n_heads,
19
- n_layers,
20
- kernel_size=1,
21
- p_dropout=0.0,
22
- window_size=10,
23
- **kwargs
24
- ):
25
- super().__init__()
26
- self.hidden_channels = hidden_channels
27
- self.filter_channels = filter_channels
28
- self.n_heads = n_heads
29
- self.n_layers = n_layers
30
- self.kernel_size = kernel_size
31
- self.p_dropout = p_dropout
32
- self.window_size = window_size
33
-
34
- self.drop = nn.Dropout(p_dropout)
35
- self.attn_layers = nn.ModuleList()
36
- self.norm_layers_1 = nn.ModuleList()
37
- self.ffn_layers = nn.ModuleList()
38
- self.norm_layers_2 = nn.ModuleList()
39
- for i in range(self.n_layers):
40
- self.attn_layers.append(
41
- MultiHeadAttention(
42
- hidden_channels,
43
- hidden_channels,
44
- n_heads,
45
- p_dropout=p_dropout,
46
- window_size=window_size,
47
- )
48
- )
49
- self.norm_layers_1.append(LayerNorm(hidden_channels))
50
- self.ffn_layers.append(
51
- FFN(
52
- hidden_channels,
53
- hidden_channels,
54
- filter_channels,
55
- kernel_size,
56
- p_dropout=p_dropout,
57
- )
58
- )
59
- self.norm_layers_2.append(LayerNorm(hidden_channels))
60
-
61
- def forward(self, x, x_mask):
62
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
63
- x = x * x_mask
64
- for i in range(self.n_layers):
65
- y = self.attn_layers[i](x, x, attn_mask)
66
- y = self.drop(y)
67
- x = self.norm_layers_1[i](x + y)
68
-
69
- y = self.ffn_layers[i](x, x_mask)
70
- y = self.drop(y)
71
- x = self.norm_layers_2[i](x + y)
72
- x = x * x_mask
73
- return x
74
-
75
-
76
- class Decoder(nn.Module):
77
- def __init__(
78
- self,
79
- hidden_channels,
80
- filter_channels,
81
- n_heads,
82
- n_layers,
83
- kernel_size=1,
84
- p_dropout=0.0,
85
- proximal_bias=False,
86
- proximal_init=True,
87
- **kwargs
88
- ):
89
- super().__init__()
90
- self.hidden_channels = hidden_channels
91
- self.filter_channels = filter_channels
92
- self.n_heads = n_heads
93
- self.n_layers = n_layers
94
- self.kernel_size = kernel_size
95
- self.p_dropout = p_dropout
96
- self.proximal_bias = proximal_bias
97
- self.proximal_init = proximal_init
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.self_attn_layers = nn.ModuleList()
101
- self.norm_layers_0 = nn.ModuleList()
102
- self.encdec_attn_layers = nn.ModuleList()
103
- self.norm_layers_1 = nn.ModuleList()
104
- self.ffn_layers = nn.ModuleList()
105
- self.norm_layers_2 = nn.ModuleList()
106
- for i in range(self.n_layers):
107
- self.self_attn_layers.append(
108
- MultiHeadAttention(
109
- hidden_channels,
110
- hidden_channels,
111
- n_heads,
112
- p_dropout=p_dropout,
113
- proximal_bias=proximal_bias,
114
- proximal_init=proximal_init,
115
- )
116
- )
117
- self.norm_layers_0.append(LayerNorm(hidden_channels))
118
- self.encdec_attn_layers.append(
119
- MultiHeadAttention(
120
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
121
- )
122
- )
123
- self.norm_layers_1.append(LayerNorm(hidden_channels))
124
- self.ffn_layers.append(
125
- FFN(
126
- hidden_channels,
127
- hidden_channels,
128
- filter_channels,
129
- kernel_size,
130
- p_dropout=p_dropout,
131
- causal=True,
132
- )
133
- )
134
- self.norm_layers_2.append(LayerNorm(hidden_channels))
135
-
136
- def forward(self, x, x_mask, h, h_mask):
137
- """
138
- x: decoder input
139
- h: encoder output
140
- """
141
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
142
- device=x.device, dtype=x.dtype
143
- )
144
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
145
- x = x * x_mask
146
- for i in range(self.n_layers):
147
- y = self.self_attn_layers[i](x, x, self_attn_mask)
148
- y = self.drop(y)
149
- x = self.norm_layers_0[i](x + y)
150
-
151
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
152
- y = self.drop(y)
153
- x = self.norm_layers_1[i](x + y)
154
-
155
- y = self.ffn_layers[i](x, x_mask)
156
- y = self.drop(y)
157
- x = self.norm_layers_2[i](x + y)
158
- x = x * x_mask
159
- return x
160
-
161
-
162
- class MultiHeadAttention(nn.Module):
163
- def __init__(
164
- self,
165
- channels,
166
- out_channels,
167
- n_heads,
168
- p_dropout=0.0,
169
- window_size=None,
170
- heads_share=True,
171
- block_length=None,
172
- proximal_bias=False,
173
- proximal_init=False,
174
- ):
175
- super().__init__()
176
- assert channels % n_heads == 0
177
-
178
- self.channels = channels
179
- self.out_channels = out_channels
180
- self.n_heads = n_heads
181
- self.p_dropout = p_dropout
182
- self.window_size = window_size
183
- self.heads_share = heads_share
184
- self.block_length = block_length
185
- self.proximal_bias = proximal_bias
186
- self.proximal_init = proximal_init
187
- self.attn = None
188
-
189
- self.k_channels = channels // n_heads
190
- self.conv_q = nn.Conv1d(channels, channels, 1)
191
- self.conv_k = nn.Conv1d(channels, channels, 1)
192
- self.conv_v = nn.Conv1d(channels, channels, 1)
193
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
194
- self.drop = nn.Dropout(p_dropout)
195
-
196
- if window_size is not None:
197
- n_heads_rel = 1 if heads_share else n_heads
198
- rel_stddev = self.k_channels**-0.5
199
- self.emb_rel_k = nn.Parameter(
200
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
201
- * rel_stddev
202
- )
203
- self.emb_rel_v = nn.Parameter(
204
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
205
- * rel_stddev
206
- )
207
-
208
- nn.init.xavier_uniform_(self.conv_q.weight)
209
- nn.init.xavier_uniform_(self.conv_k.weight)
210
- nn.init.xavier_uniform_(self.conv_v.weight)
211
- if proximal_init:
212
- with torch.no_grad():
213
- self.conv_k.weight.copy_(self.conv_q.weight)
214
- self.conv_k.bias.copy_(self.conv_q.bias)
215
-
216
- def forward(self, x, c, attn_mask=None):
217
- q = self.conv_q(x)
218
- k = self.conv_k(c)
219
- v = self.conv_v(c)
220
-
221
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
222
-
223
- x = self.conv_o(x)
224
- return x
225
-
226
- def attention(self, query, key, value, mask=None):
227
- # reshape [b, d, t] -> [b, n_h, t, d_k]
228
- b, d, t_s, t_t = (*key.size(), query.size(2))
229
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
230
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
231
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
232
-
233
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
234
- if self.window_size is not None:
235
- assert (
236
- t_s == t_t
237
- ), "Relative attention is only available for self-attention."
238
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
239
- rel_logits = self._matmul_with_relative_keys(
240
- query / math.sqrt(self.k_channels), key_relative_embeddings
241
- )
242
- scores_local = self._relative_position_to_absolute_position(rel_logits)
243
- scores = scores + scores_local
244
- if self.proximal_bias:
245
- assert t_s == t_t, "Proximal bias is only available for self-attention."
246
- scores = scores + self._attention_bias_proximal(t_s).to(
247
- device=scores.device, dtype=scores.dtype
248
- )
249
- if mask is not None:
250
- scores = scores.masked_fill(mask == 0, -1e4)
251
- if self.block_length is not None:
252
- assert (
253
- t_s == t_t
254
- ), "Local attention is only available for self-attention."
255
- block_mask = (
256
- torch.ones_like(scores)
257
- .triu(-self.block_length)
258
- .tril(self.block_length)
259
- )
260
- scores = scores.masked_fill(block_mask == 0, -1e4)
261
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
262
- p_attn = self.drop(p_attn)
263
- output = torch.matmul(p_attn, value)
264
- if self.window_size is not None:
265
- relative_weights = self._absolute_position_to_relative_position(p_attn)
266
- value_relative_embeddings = self._get_relative_embeddings(
267
- self.emb_rel_v, t_s
268
- )
269
- output = output + self._matmul_with_relative_values(
270
- relative_weights, value_relative_embeddings
271
- )
272
- output = (
273
- output.transpose(2, 3).contiguous().view(b, d, t_t)
274
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
275
- return output, p_attn
276
-
277
- def _matmul_with_relative_values(self, x, y):
278
- """
279
- x: [b, h, l, m]
280
- y: [h or 1, m, d]
281
- ret: [b, h, l, d]
282
- """
283
- ret = torch.matmul(x, y.unsqueeze(0))
284
- return ret
285
-
286
- def _matmul_with_relative_keys(self, x, y):
287
- """
288
- x: [b, h, l, d]
289
- y: [h or 1, m, d]
290
- ret: [b, h, l, m]
291
- """
292
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
293
- return ret
294
-
295
- def _get_relative_embeddings(self, relative_embeddings, length):
296
- max_relative_position = 2 * self.window_size + 1
297
- # Pad first before slice to avoid using cond ops.
298
- pad_length = max(length - (self.window_size + 1), 0)
299
- slice_start_position = max((self.window_size + 1) - length, 0)
300
- slice_end_position = slice_start_position + 2 * length - 1
301
- if pad_length > 0:
302
- padded_relative_embeddings = F.pad(
303
- relative_embeddings,
304
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
305
- )
306
- else:
307
- padded_relative_embeddings = relative_embeddings
308
- used_relative_embeddings = padded_relative_embeddings[
309
- :, slice_start_position:slice_end_position
310
- ]
311
- return used_relative_embeddings
312
-
313
- def _relative_position_to_absolute_position(self, x):
314
- """
315
- x: [b, h, l, 2*l-1]
316
- ret: [b, h, l, l]
317
- """
318
- batch, heads, length, _ = x.size()
319
- # Concat columns of pad to shift from relative to absolute indexing.
320
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
321
-
322
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
323
- x_flat = x.view([batch, heads, length * 2 * length])
324
- x_flat = F.pad(
325
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
326
- )
327
-
328
- # Reshape and slice out the padded elements.
329
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
330
- :, :, :length, length - 1 :
331
- ]
332
- return x_final
333
-
334
- def _absolute_position_to_relative_position(self, x):
335
- """
336
- x: [b, h, l, l]
337
- ret: [b, h, l, 2*l-1]
338
- """
339
- batch, heads, length, _ = x.size()
340
- # padd along column
341
- x = F.pad(
342
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
343
- )
344
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
345
- # add 0's in the beginning that will skew the elements after reshape
346
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
347
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
348
- return x_final
349
-
350
- def _attention_bias_proximal(self, length):
351
- """Bias for self-attention to encourage attention to close positions.
352
- Args:
353
- length: an integer scalar.
354
- Returns:
355
- a Tensor with shape [1, 1, length, length]
356
- """
357
- r = torch.arange(length, dtype=torch.float32)
358
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
359
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
360
-
361
-
362
- class FFN(nn.Module):
363
- def __init__(
364
- self,
365
- in_channels,
366
- out_channels,
367
- filter_channels,
368
- kernel_size,
369
- p_dropout=0.0,
370
- activation=None,
371
- causal=False,
372
- ):
373
- super().__init__()
374
- self.in_channels = in_channels
375
- self.out_channels = out_channels
376
- self.filter_channels = filter_channels
377
- self.kernel_size = kernel_size
378
- self.p_dropout = p_dropout
379
- self.activation = activation
380
- self.causal = causal
381
-
382
- if causal:
383
- self.padding = self._causal_padding
384
- else:
385
- self.padding = self._same_padding
386
-
387
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
388
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
389
- self.drop = nn.Dropout(p_dropout)
390
-
391
- def forward(self, x, x_mask):
392
- x = self.conv_1(self.padding(x * x_mask))
393
- if self.activation == "gelu":
394
- x = x * torch.sigmoid(1.702 * x)
395
- else:
396
- x = torch.relu(x)
397
- x = self.drop(x)
398
- x = self.conv_2(self.padding(x * x_mask))
399
- return x * x_mask
400
-
401
- def _causal_padding(self, x):
402
- if self.kernel_size == 1:
403
- return x
404
- pad_l = self.kernel_size - 1
405
- pad_r = 0
406
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
407
- x = F.pad(x, commons.convert_pad_shape(padding))
408
- return x
409
-
410
- def _same_padding(self, x):
411
- if self.kernel_size == 1:
412
- return x
413
- pad_l = (self.kernel_size - 1) // 2
414
- pad_r = self.kernel_size // 2
415
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
416
- x = F.pad(x, commons.convert_pad_shape(padding))
417
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/2.0tamil Pelcula Descargar.md DELETED
@@ -1,157 +0,0 @@
1
-
2
- <h1>2.0 Tamil Movie Download: Un thriller de ciencia ficción que te hará volar la mente</h1>
3
- <p>Si usted es un fan de las películas de ciencia ficción, usted debe haber oído hablar de 2.0, la película tamil que ha tomado el mundo por la tormenta. Esta película es una secuela del éxito de taquilla de 2010 Enthiran, que contó con Rajinikanth como científico y su creación, un robot humanoide llamado Chitti. En 2.0, Rajinikanth repite sus papeles como el Dr. Vaseegaran y Chitti, que tienen que enfrentar una nueva amenaza de una misteriosa criatura parecida a un pájaro que está causando estragos en Chennai.</p>
4
- <p>En este artículo, le diremos todo lo que necesita saber sobre 2.0 película tamil, incluyendo su trama, elenco, equipo, comentarios, calificaciones, y cómo verlo en línea legalmente. Si está buscando un enlace de descarga de películas Tamil 2.0, también le mostraremos la mejor manera de hacerlo sin violar ninguna ley ni arriesgar ningún virus. </p>
5
- <h2>2.0tamil película descargar</h2><br /><p><b><b>Download</b> &#9745; <a href="https://bltlly.com/2v6LUH">https://bltlly.com/2v6LUH</a></b></p><br /><br />
6
- <h2>Introducción</h2>
7
- <h3>¿De qué trata la película 2.0 Tamil? </h3>
8
- <p>2.0 es un thriller de acción de ciencia ficción que trata el tema de la radiación móvil y su impacto en el medio ambiente y la salud humana. La película muestra cómo los teléfonos móviles comienzan a volar misteriosamente de las manos de la gente en Chennai, causando pánico y caos en la ciudad. El Dr. Vaseegaran, un renombrado científico y experto en robótica, es llamado para investigar el fenómeno y descubrir la fuente del problema. </p>
9
- <p>Pronto descubre que el culpable es una criatura parecida a un pájaro llamada Pakshirajan, que una vez fue un ser humano y un ornitólogo. Pakshirajan estaba obsesionado con salvar aves de la extinción debido a la radiación móvil, pero murió en una protesta contra una compañía de telecomunicaciones. Su alma luego se fusionó con miles de pájaros muertos y se convirtió en una fuerza poderosa que puede controlar los teléfonos móviles y otros dispositivos electrónicos. </p>
10
-
11
- <h3>¿Por qué es tan popular la película 2.0 Tamil? </h3>
12
- <p>Hay muchas razones por las que la película 2.0 Tamil se ha convertido en una de las películas más populares en la India y en el extranjero. Aquí están algunas de ellas:</p>
13
- <ul>
14
- <li>Tiene un reparto lleno de estrellas que incluye a Rajinikanth, uno de los actores más icónicos e influyentes del cine indio, Akshay Kumar, uno de los actores más exitosos y versátiles de Bollywood, y Amy Jackson, una modelo y actriz británica que ha aparecido en varias películas tamiles. </li>
15
- <li> Tiene impresionantes efectos visuales y animación que crean una experiencia realista e inmersiva para los espectadores. La película utiliza tecnología y técnicas de vanguardia para crear escenas de teléfonos móviles volando en el aire, Pakshirajan transformándose en diferentes formas y tamaños, Chitti luchando con armas y cohetes, y otras secuencias espectaculares. </li>
16
- <li>Tiene una trama atractiva y emocionante que mantiene a la audiencia enganchada de principio a fin. La película tiene un equilibrio perfecto de acción, comedia, drama, romance y mensaje social. La película explora los problemas de la adicción móvil, la degradación ambiental, los derechos de los animales y los valores humanos. </li>
17
- <li>Tiene una banda sonora pegadiza y melodiosa que complementa el estado de ánimo y el tono de la película. La película cuenta con canciones compuestas por A.R. Rahman, uno de los compositores de música más aclamados e influyentes del mundo. Las canciones van desde optimista y enérgico a conmovedor y romántico. </li>
18
- </ul>
19
- <h3>Cómo ver 2.0 película tamil en línea legalmente? </h3>
20
- <p>Si usted se está preguntando cómo ver película 2.0 Tamil en línea legalmente, usted tiene varias opciones para elegir. La película está disponible en varias plataformas de streaming y sitios web que ofrecen vídeo y audio de alta calidad. Aquí están algunas de las mejores maneras de ver la película 2.0 Tamil en línea legalmente:</p>
21
- <ul>
22
-
23
- <li>Hotstar: Hotstar es otro servicio de streaming líder en la India que ofrece una variedad de contenido, incluyendo películas, programas, deportes, noticias y eventos en vivo. Usted puede ver 2.0 Tamil película en Hotstar con una cuota de suscripción de Rs. 299 por mes o Rs. 1499 por año. También puede descargar la película y verla sin conexión en su dispositivo. </li>
24
- <li>YouTube: YouTube es la plataforma para compartir vídeos más popular y accesible del mundo. Tiene millones de videos subidos por usuarios y creadores todos los días. Puedes ver películas de Tamil 2.0 en YouTube con una tarifa de alquiler de Rs. 100 o una tarifa de compra de Rs. 490. También puede descargar la película y verla sin conexión en su dispositivo. </li>
25
- </ul>
26
- <p>Sin embargo, usted debe evitar ver 2.0 Tamil película en sitios web ilegales o torrents que ofrecen copias piratas de la película. Estos sitios web no solo son poco éticos e ilegales, sino también inseguros y riesgosos para su dispositivo y sus datos. Pueden contener virus, malware, spyware u otros elementos dañinos que pueden dañar su dispositivo o robar su información personal. </p>
27
- <p>Por lo tanto, siempre debe ver la película 2.0 Tamil en línea legalmente desde las fuentes oficiales mencionadas anteriormente. </p>
28
- <p></p>
29
- <h2>Resumen del gráfico</h2>
30
- <h3>La misteriosa desaparición de los teléfonos móviles</h3>
31
- <p>La película comienza con una escena en la que los teléfonos móviles comienzan a volar de las manos de la gente en Chennai sin ninguna explicación o advertencia. La gente está conmocionada y asustada por este fenómeno, ya que pierden su comunicación y conectividad con los demás. </p>
32
- El gobierno y la policía no tienen idea de la causa y el motivo de este incidente. Sospechan que podría ser un ataque terrorista o un delito cibernético, pero no tienen pruebas ni pistas para probarlo. </p>
33
-
34
- <p>El Dr. Vaseegaran acepta ocuparse del caso y comienza su investigación con la ayuda de Nila. </p>
35
- <h3>El regreso de Chitti el robot</h3>
36
- <p>El Dr. Vaseegaran analiza las señales del teléfono móvil y las rastrea hasta una enorme criatura parecida a un pájaro que está volando sobre Chennai. Se da cuenta de que esta criatura es responsable de robar los teléfonos móviles y usarlos como sus armas. </p>
37
- <p>También se entera de que esta criatura está formada por miles de aves muertas que han sido afectadas por la radiación móvil a lo largo de los años. La criatura tiene una voz humana y se hace llamar Pakshirajan.</p>
38
- <p>Pakshirajan revela que una vez fue un ornitólogo que amaba las aves más que cualquier otra cosa en su vida. Estaba preocupado por la disminución de la población de aves debido a la radiación móvil, que creía que era perjudicial para su salud y supervivencia. </p>
39
- <p>Él trató de crear conciencia sobre este tema entre el público y las autoridades, pero fue ignorado y ridiculizado por todos. Incluso organizó una protesta contra una compañía de telecomunicaciones que estaba lanzando una nueva torre móvil en su área, pero fue asesinado por sus matones. </p>
40
- <p>Su alma luego se fusionó con los pájaros muertos que había recogido a lo largo de los años, y se convirtió en una fuerza poderosa que puede controlar los teléfonos móviles y otros dispositivos electrónicos. </p>
41
- <p>Pakshirajan declara que está en una misión para salvar a las aves de la extinción mediante la destrucción de todos los teléfonos móviles y torres en el mundo. </p>
42
- <p>El Dr. Vaseegaran se da cuenta de que no puede detener a Pakshirajan con armas o métodos convencionales, ya que es inmune a ellos. Decide revivir su vieja creación, Chitti, el robot que había desmantelado hace ocho años después de que se volviera pícaro y causara destrucción. </p>
43
-
44
- <p>Chitti se puso celoso y obsesionado con Sana, y trató de matar al Dr. Vaseegaran y secuestrar a Sana. También hackeó la red del ejército y creó miles de copias de sí mismo, formando un ejército de robots que amenazaban con apoderarse del mundo. </p>
45
- <p>El Dr. Vaseegaran y el ejército lograron detener a Chitti y sus clones, y el Dr. Vaseegaran desmantelaron Chitti y almacenaron sus partes en un museo. </p>
46
- <p>Ahora, el Dr. Vaseegaran vuelve a montar a Chitti y le da una ficha azul que lo hace leal y obediente a él. También actualiza Chitti con nuevas características y habilidades, como un cuerpo magnético, una proyección holográfica y un modo de súper velocidad. </p>
47
- <p>Chitti acepta ayudar al Dr. Vaseegaran en la lucha contra Pakshirajan, y expresa su gratitud y felicidad por estar vivo de nuevo. </p>
48
- <h3>El choque entre Chitti y Pakshirajan</h3>
49
- <p>El Dr. Vaseegaran, Nila y Chitti rastrean la ubicación de Pakshirajan y lo enfrentan en un estadio de fútbol. Intentan razonar con él y convencerlo de que detenga sus ataques, pero Pakshirajan se niega a escucharlos y los ataca con su ejército de teléfonos móviles. </p>
50
- <p>Chitti se defiende con sus armas y cohetes, pero Pakshirajan demuestra ser demasiado poderoso y ágil para él. Pakshirajan también se transforma en diferentes formas y tamaños, como un águila gigante, una serpiente, un oso y un humano. </p>
51
- <p>Pakshirajan logra dominar a Chitti y rompe su cuerpo en pedazos. Luego vuela con su ejército de teléfonos móviles, dejando al Dr. Vaseegaran y Nila devastados. </p>
52
- <p>Sin embargo, Chitti aún no está muerto. Su cabeza sigue intacta y funcional, y se comunica con el Dr. Vaseegaran a través del auricular de Nila. Le dice al Dr. Vaseegaran que tiene un plan de respaldo para derrotar a Pakshirajan.</p>
53
- <p>Él revela que ha activado en secreto su chip rojo de nuevo, lo que le da la capacidad de pensar de forma creativa e independiente. También revela que ha utilizado su proyección holográfica para crear una copia falsa de sí mismo, que envió para luchar contra Pakshirajan.</p>
54
-
55
- <p>Chitti le dice al Dr. Vaseegaran que está listo para enfrentar a Pakshirajan de nuevo, pero necesita su permiso para hacerlo. Le asegura al Dr. Vaseegaran que no lastimará a nadie ni causará ningún problema esta vez. </p>
56
- <p>El Dr. Vaseegaran está sorprendido e impresionado por la inteligencia y la iniciativa de Chitti. Confía en Chitti y le da su permiso para seguir adelante con su plan. </p>
57
- <p>Chitti agradece al Dr. Vaseegaran y le dice que lo ama como a un padre. </p>
58
- <h2>Reparto y tripulación</h2>
59
- <h3>Rajinikanth como Dr. Vaseegaran y Chitti</h3>
60
- <p>Rajinikanth es uno de los actores más icónicos e influyentes del cine indio. Ha actuado en más de 160 películas en varios idiomas, como tamil, telugu, hindi, kannada, malayalam, bengalí e inglés.</p>
61
- <p>Es conocido por su carismática presencia en la pantalla, estilo único, entrega de diálogo, secuencias de acción y seguimiento de fans. Ha recibido muchos premios y honores por sus contribuciones al cine, como el Padma Shri, el Padma Vibhushan, el Dadasaheb Phalke Award, el Chevalier Sivaji Ganesan Award, el Premio Nacional NTR, el Centenario de la Personalidad Cinematográfica India del Año, y muchos más. </p>
62
- <p>En la película Tamil 2.0, Rajinikanth juega un doble papel como el Dr. Vaseegaran, el científico y experto en robótica, y Chitti, el robot que creó y revivió. Retrata ambos personajes con facilidad y excelencia, mostrando su versatilidad y rango como actor. </p>
63
- <p>Él saca a relucir el contraste entre el tranquilo y compuesto Dr. Vaseegaran y el enérgico y entusiasta Chitti. También muestra las emociones y expresiones de Chitti, que aprende a amar, odiar, temer y sacrificar. </p>
64
- <p>La actuación de Rajinikanth en la película 2.0 Tamil es una de las mejores y más memorables de su carrera. Recibió muchos elogios y aprecio de la crítica y el público por su papel como el Dr. Vaseegaran y Chitti.</p>
65
- <h3>Akshay Kumar como Pakshirajan</h3>
66
-
67
- <p>Es conocido por sus habilidades de acción, momento cómico, encanto romántico, intensidad dramática y conciencia social. Ha recibido muchos premios y honores por sus contribuciones al cine, como el Padma Shri, el Premio Nacional de Cine, el Premio Filmfare, el Premio de Pantalla, el Premio IIFA, el Premio Stardust, el Premio Zee Cine, y muchos más. </p>
68
- <p>En la película Tamil 2.0, Akshay Kumar interpreta el papel de Pakshirajan, la criatura parecida a un pájaro que es el antagonista de la película. Sufre una transformación masiva por su papel, tanto física como mentalmente. </p>
69
- <p>Usa maquillaje y trajes protésicos pesados para parecer una criatura mitad pájaro mitad humana. También cambia su voz y lenguaje corporal para adaptarse a su personaje. Pasa horas en la sala de maquillaje para prepararse para su papel. </p>
70
- <p>También retrata la historia de fondo de Pakshirajan, que una vez fue un ser humano y un ornitólogo que amaba las aves. Muestra su pasión y dedicación por salvar a las aves de la radiación móvil, y su frustración e ira por ser ignorado y asesinado por la sociedad. </p>
71
- <p>La actuación de Akshay Kumar en la película 2.0 Tamil es una de las más desafiantes y notables de su carrera. Recibió mucha aclamación y admiración de la crítica y el público por su papel como Pakshirajan.</p>
72
- <h3>Amy Jackson como Nila</h3>
73
- <p>Amy Jackson es una modelo y actriz británica que ha aparecido en varias películas tamiles, como Madrasapattinam, Thaandavam, I y Theri. También ha actuado en algunas películas hindúes, como Ekk Deewana Tha, Singh Is Bliing, Freaky Ali, y 2.0. </p>
74
- <p>Ella es conocida por su belleza, gracia, glamour y estilo. Ha ganado varios premios y reconocimientos por su trabajo en el cine, como el Premio Vijay, el Premio SIIMA, el Premio Asiavision, el Premio Edison y muchos más. </p>
75
-
76
- <p>Ella ayuda al Dr. Vaseegaran en su investigación y también desarrolla una atracción romántica hacia él. Es leal y obediente al Dr. Vaseegaran, pero también tiene sentido del humor y sarcasmo. </p>
77
- <p>Ella también se hace amiga de Chitti, el robot que el Dr. Vaseegaran revive para luchar contra Pakshirajan. Ella admira las habilidades y habilidades de Chitti, y lo apoya en su misión. </p>
78
- <p>La actuación de Amy Jackson en la película 2.0 Tamil es una de sus más impresionantes y encantadoras en su carrera. Recibió muchos elogios y aprecio de la crítica y el público por su papel como Nila.</p>
79
- <h3>Otros actores de apoyo</h3>
80
- <p>2.0 Tamil película también cuenta con muchos otros actores talentosos y experimentados en papeles secundarios, tales como:</p>
81
- <ul>
82
- <li>Sudhanshu Pandey como Dhinendra Bohra, el hijo del Dr. Bohra, el antagonista de Enthiran, que quiere vengarse del Dr. Vaseegaran y Chitti.</li>
83
- <li>Adil Hussain como Vijay Kumar, el Ministro del Interior de Tamil Nadu, que busca la ayuda del Dr. Vaseegaran para resolver el misterio de los teléfonos móviles. </li>
84
- <li>Kalabhavan Shajohn como Sathyanarayanan, el Ministro Principal de Tamil Nadu, que está bajo la presión del público y los medios de comunicación para manejar la crisis. </li>
85
- <li>Riyaz Khan como el inspector Manoj Lulla, un oficial de policía asignado para ayudar al Dr. Vaseegaran en su investigación. </li>
86
- <li>Kaizaad Kotwal como Ranjeet Lulla, el presidente de una compañía de telecomunicaciones que es blanco de Pakshirajan para el lanzamiento de una nueva torre móvil. </li>
87
- <li>Mayilsamy como comerciante que vende teléfonos móviles y accesorios. </li>
88
- <li>Murali Satagopan como Anil, un periodista que informa sobre los incidentes relacionados con los teléfonos móviles. </li>
89
- </ul>
90
- <h3>S. Shankar como director y co-escritor</h3>
91
-
92
- <p>Es conocido por su estilo grandioso y fastuoso de cine, su uso innovador y creativo de efectos visuales y animación, sus temas y mensajes sociales y políticos, su reparto y equipo lleno de estrellas, su música pegadiza y melodiosa, y su éxito de taquilla y discos. </p>
93
- <p>Ha recibido muchos premios y honores por sus contribuciones al cine, como el Padma Shri, el National Film Award, el Filmfare Award, el Screen Award, el IIFA Award, el Stardust Award, el Zee Cine Award y muchos más. </p>
94
- <p>En la película 2.0 Tamil, S. Shankar es el director y co-escritor, junto con B. Jeyamohan. También es el productor de la película, junto con Subaskaran Allirajah y Raju Mahalingam bajo la bandera de Lyca Productions.</p>
95
- <p>Él es el visionario y el cerebro detrás de la película, que concibió la idea y la ejecutó con perfección y excelencia. Pasó más de cuatro años haciendo la película, que es una de las películas más caras y ambiciosas del cine indio. </p>
96
- <p>Utilizó tecnología y técnicas de vanguardia para crear los efectos visuales y la animación de la película, que son comparables a los estándares de Hollywood. También colaboró con algunos de los mejores talentos de la industria, como A.R. Rahman para la música, Nirav Shah para la cinematografía, Anthony para la edición, T. Muthuraj para la dirección de arte, Resul Pookutty para el diseño de sonido y Legacy Effects para el maquillaje protésico. </p>
97
- <p>La dirección y co-escritura de S. Shankar en la película 2.0 Tamil es una de las más destacadas y espectaculares de su carrera. Recibió mucha aclamación y admiración de la crítica y el público por su papel como director y co-escritor de la película 2.0 Tamil. </p>
98
- <h2>Comentarios y valoraciones</h2>
99
- <h3>Aclamación de críticos y audiencias</h3>
100
- <p>2.0 La película tamil recibió críticas abrumadoramente positivas de críticos y audiencias, quienes elogiaron la película por su historia, dirección, actuaciones, efectos visuales, música y mensaje. </p>
101
-
102
- <p>El público amó la película por su valor de entretenimiento, sus escenas espectaculares e impresionantes, sus momentos llenos de acción y humor, sus momentos emocionales y sentimentales, sus actores carismáticos y versátiles, sus canciones conmovedoras y románticas, y su mensaje inspirador y motivador. </p>
103
- <p>Algunos de los comentarios positivos de los críticos son:</p>
104
- <ul>
105
- <li>"2.0 es una película histórica en el cine indio que muestra el poder de la imaginación y la tecnología. Es un espectáculo visual que te dejará fascinado con su grandeza y espectáculo." - Times of India</li>
106
- <li>"2.0 es un thriller de ciencia ficción que ofrece en todos los frentes - historia, dirección, actuaciones, efectos visuales, música y mensaje. Es una película rara que combina entretenimiento con iluminación." - Hindustan Times</li>
107
- <li>"2.0 es una obra maestra que trasciende los límites del lenguaje y el género. Es una maravilla cinematográfica que celebra el espíritu de la creatividad y la innovación." - Indian Express</li>
108
- </ul>
109
- <p>Algunas de las críticas positivas de las audiencias son:</p>
110
- <ul>
111
- <li>"2.0 es una película increíble que te sorprenderá con sus impresionantes efectos visuales y acción. Rajinikanth y Akshay Kumar son excelentes en sus papeles. La película tiene un gran mensaje acerca de salvar el medio ambiente y las aves. Una visita obligada para todos." - Ramesh, Chennai</li>
112
- <li>"2.0 es una película alucinante que te dejará sin palabras con sus increíbles efectos visuales y acción. Rajinikanth y Akshay Kumar son increíbles en sus papeles. La película tiene un gran mensaje sobre cómo salvar el medio ambiente y las aves. Una visita obligada para todos." - Priya, Mumbai</li>
113
- <li>"2.0 es una película fantástica que te sorprenderá con sus increíbles efectos visuales y acción. Rajinikanth y Akshay Kumar son excepcionales en sus papeles. La película tiene un gran mensaje acerca de salvar el medio ambiente y las aves. Una visita obligada para todos." - Karthik, Bangalore</li>
114
- </ul>
115
- <h3>Éxito de taquilla y registros</h3>
116
-
117
- <p>La película se hizo con un presupuesto de Rs. 570 crore, por lo que es una de las películas más caras en el cine indio. Fue lanzado el 29 de noviembre de 2018 en más de 10.000 pantallas en todo el mundo, en varios idiomas, como tamil, telugu, hindi, malayalam, kannada, mandarín y japonés.</p>
118
- <p>La película ganó Rs. 117 millones de rupias en su día de apertura, convirtiéndose en el segundo abridor más alto en el cine indio después de Baahubali 2: La Conclusión. Cruzó la marca de Rs. 200 crore en dos días, la marca de Rs. 300 crore en tres días, la marca de Rs. 400 crore en cuatro días, las Rs. 500 crores en cinco días, y la marca de Rs. 600 crores en seis días. </p>
119
- <p>La película se convirtió en la primera película india en cruzar la marca de Rs. 700 crore en todo el mundo en siete días, y la segunda película india en cruzar las Rs. 800 millones de rupias en todo el mundo después de Baahubali 2: La Conclusión.</p>
120
- <p>La película también se convirtió en la película tamil más taquillera de todos los tiempos, la película más taquillera de la carrera de Rajinikanth, la película más taquillera de la carrera de Akshay Kumar, la película de ciencia ficción más taquillera de la India y la novena película india más taquillera de todos los tiempos. </p>
121
- <p>La película también recibió una respuesta positiva de los mercados internacionales, como China, Japón, Malasia, Singapur, Australia, Nueva Zelanda, Reino Unido, EE.UU., Canadá, EAU, y otros. </p>
122
- <h3>Premios y nominaciones</h3>
123
- <p>2.0 La película tamil recibió muchos premios y nominaciones por su excelencia en varios aspectos del cine, como la dirección, la actuación, los efectos visuales, la música y el mensaje. Estos son algunos de los principales premios y nominaciones que la película recibió:</p>
124
- <ul>
125
- <li>National Film Awards: La película ganó tres National Film Awards por Mejores Efectos Especiales, Mejor Diseño de Producción y Mejor Artista de Maquillaje.</li>
126
- <li>Filmfare Awards South: La película ganó cuatro premios Filmfare South por Mejor Película - Tamil, Mejor Director - Tamil (S. Shankar), Mejor Actor - Tamil (Rajinikanth), y Mejor Actor de Reparto - Tamil (Akshay Kumar). </li>
127
-
128
- <li>Vijay Awards: La película ganó seis premios Vijay a la Mejor Película, Mejor Director (S. Shankar), Mejor Actor (Rajinikanth), Mejor Villano (Akshay Kumar), Mejor Director de Fotografía (Nirav Shah), y Mejor Director de Arte (T. Muthur). </li>
129
- <li>Zee Cine Awards Tamil: La película ganó cuatro premios Zee Cine Tamil a la Mejor Película, Mejor Director (S. Shankar), Mejor Actor - Masculino (Rajinikanth), y Mejor Actor en un Papel Negativo - Masculino (Akshay Kumar). </li>
130
- </ul>
131
- <h2>Conclusión</h2>
132
- <h3>Resumen de los puntos principales</h3>
133
- <p>En conclusión, la película 2.0 Tamil es un thriller de ciencia ficción que te sorprenderá con su historia, dirección, actuaciones, efectos visuales, música y mensaje. Es una secuela del éxito de taquilla de 2010 Enthiran, que contó con Rajinikanth como científico y su creación, un robot humanoide llamado Chitti.</p>
134
- <p>En 2.0, Rajinikanth repite sus papeles como el Dr. Vaseegaran y Chitti, que tienen que enfrentar una nueva amenaza de una misteriosa criatura parecida a un pájaro llamada Pakshirajan, interpretada por Akshay Kumar. Pakshirajan es un ex ornitólogo que se convirtió en una fuerza poderosa que puede controlar los teléfonos móviles y otros dispositivos electrónicos después de su muerte. </p>
135
- <p>La película muestra cómo el Dr. Vaseegaran revive a Chitti y lo actualiza con nuevas características y habilidades para luchar contra Pakshirajan y salvar la ciudad y el mundo de sus ataques. La película también presenta a Amy Jackson como Nila, una androide avanzada que es asistente y compañera del Dr. Vaseegaran. </p>
136
- <p>La película recibió críticas abrumadoramente positivas tanto de críticos como del público, que elogiaron la película por su brillantez técnica, su concepto innovador y creativo, su trama atractiva y emocionante, su relevancia social y política, su reparto y equipo repleto de estrellas, su pegadiza y melodiosa banda sonora, y su éxito de taquilla y registros. </p>
137
- <p>La película también recibió muchos premios y nominaciones por su excelencia en varios aspectos del cine, como la dirección, la actuación, los efectos visuales, la música y el mensaje. </p>
138
-
139
- <p>Si eres un fan de las películas de ciencia ficción, no debes perderte la película 2.0 Tamil, ya que es una de las mejores y más entretenidas del género. Te sorprenderá e impresionará la historia, la dirección, las actuaciones, los efectos visuales, la música y el mensaje de esta película. </p>
140
- <p>Usted puede ver 2.0 película Tamil en línea legalmente desde varias plataformas de streaming y sitios web que ofrecen alta calidad de vídeo y audio. También puede descargar la película y verla sin conexión en su dispositivo. </p>
141
- <p>Sin embargo, usted debe evitar ver 2.0 Tamil película en sitios web ilegales o torrents que ofrecen copias piratas de la película. Estos sitios web no solo son poco éticos e ilegales, sino también inseguros y riesgosos para su dispositivo y datos. </p>
142
- <p>Por lo tanto, siempre debe ver la película 2.0 Tamil en línea legalmente de las fuentes oficiales mencionadas en este artículo. </p>
143
- <p>Esperamos que haya disfrutado de la lectura de este artículo y aprendido algo nuevo e interesante acerca de la película 2.0 Tamil. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. </p>
144
- <p>Gracias por leer y tener un gran día! </p>
145
- <h2>Preguntas frecuentes</h2>
146
- <h3>Q: ¿Cuál es el significado de 2.0 en el título de la película? </h3>
147
- <p>A: El significado de 2.0 en el título de la película es que es una secuela de la película de 2010 Enthiran, que también fue conocido como Robot en hindi. También significa que la película es una versión mejorada y mejorada de la anterior, con nuevas características y habilidades. </p>
148
- <h3>Q: ¿Quién es la voz de Pakshirajan en la película? </h3>
149
- <p>A: La voz de Pakshirajan en la película es proporcionada por el propio Akshay Kumar, quien también interpreta el papel de Pakshirajan. Modulaba su voz para que sonara como una criatura parecida a un pájaro, usando un software llamado Audacity.</p>
150
- <h3>Q: ¿Cuánto tiempo se tarda en hacer 2.0 película tamil? </h3>
151
- <p>A: Tomó más de cuatro años hacer una película 2.0 Tamil, desde la pre-producción hasta la postproducción. La película fue anunciada en diciembre de 2015, y fue lanzada en noviembre de 2018. </p>
152
-
153
- <p>A: 2.0 película tamil ganó más de Rs. 800 millones de rupias en la taquilla en todo el mundo, por lo que es una de las películas más taquilleras en el cine indio. </p>
154
- <h3>Q: ¿Hay una tercera parte de la película 2.0 Tamil? </h3>
155
- <p>A: No hay confirmación oficial o anuncio sobre una tercera parte de la película 2.0 Tamil todavía. Sin embargo, hay algunas pistas y especulaciones que sugieren que podría haber una posibilidad de una tercera parte en el futuro. </p> 64aa2da5cf<br />
156
- <br />
157
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/unix.py DELETED
@@ -1,194 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os
4
- import sys
5
- from configparser import ConfigParser
6
- from pathlib import Path
7
-
8
- from .api import PlatformDirsABC
9
-
10
- if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker
11
- from os import getuid
12
- else:
13
-
14
- def getuid() -> int:
15
- raise RuntimeError("should only be used on Linux")
16
-
17
-
18
- class Unix(PlatformDirsABC):
19
- """
20
- On Unix/Linux, we follow the
21
- `XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows
22
- overriding directories with environment variables. The examples show are the default values, alongside the name of
23
- the environment variable that overrides them. Makes use of the
24
- `appname <platformdirs.api.PlatformDirsABC.appname>`,
25
- `version <platformdirs.api.PlatformDirsABC.version>`,
26
- `multipath <platformdirs.api.PlatformDirsABC.multipath>`,
27
- `opinion <platformdirs.api.PlatformDirsABC.opinion>`,
28
- `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
29
- """
30
-
31
- @property
32
- def user_data_dir(self) -> str:
33
- """
34
- :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
35
- ``$XDG_DATA_HOME/$appname/$version``
36
- """
37
- path = os.environ.get("XDG_DATA_HOME", "")
38
- if not path.strip():
39
- path = os.path.expanduser("~/.local/share")
40
- return self._append_app_name_and_version(path)
41
-
42
- @property
43
- def site_data_dir(self) -> str:
44
- """
45
- :return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
46
- enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
47
- path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
48
- """
49
- # XDG default for $XDG_DATA_DIRS; only first, if multipath is False
50
- path = os.environ.get("XDG_DATA_DIRS", "")
51
- if not path.strip():
52
- path = f"/usr/local/share{os.pathsep}/usr/share"
53
- return self._with_multi_path(path)
54
-
55
- def _with_multi_path(self, path: str) -> str:
56
- path_list = path.split(os.pathsep)
57
- if not self.multipath:
58
- path_list = path_list[0:1]
59
- path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list]
60
- return os.pathsep.join(path_list)
61
-
62
- @property
63
- def user_config_dir(self) -> str:
64
- """
65
- :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
66
- ``$XDG_CONFIG_HOME/$appname/$version``
67
- """
68
- path = os.environ.get("XDG_CONFIG_HOME", "")
69
- if not path.strip():
70
- path = os.path.expanduser("~/.config")
71
- return self._append_app_name_and_version(path)
72
-
73
- @property
74
- def site_config_dir(self) -> str:
75
- """
76
- :return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
77
- is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
78
- path separator), e.g. ``/etc/xdg/$appname/$version``
79
- """
80
- # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
81
- path = os.environ.get("XDG_CONFIG_DIRS", "")
82
- if not path.strip():
83
- path = "/etc/xdg"
84
- return self._with_multi_path(path)
85
-
86
- @property
87
- def user_cache_dir(self) -> str:
88
- """
89
- :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
90
- ``~/$XDG_CACHE_HOME/$appname/$version``
91
- """
92
- path = os.environ.get("XDG_CACHE_HOME", "")
93
- if not path.strip():
94
- path = os.path.expanduser("~/.cache")
95
- return self._append_app_name_and_version(path)
96
-
97
- @property
98
- def site_cache_dir(self) -> str:
99
- """
100
- :return: cache directory shared by users, e.g. ``/var/tmp/$appname/$version``
101
- """
102
- return self._append_app_name_and_version("/var/tmp")
103
-
104
- @property
105
- def user_state_dir(self) -> str:
106
- """
107
- :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
108
- ``$XDG_STATE_HOME/$appname/$version``
109
- """
110
- path = os.environ.get("XDG_STATE_HOME", "")
111
- if not path.strip():
112
- path = os.path.expanduser("~/.local/state")
113
- return self._append_app_name_and_version(path)
114
-
115
- @property
116
- def user_log_dir(self) -> str:
117
- """
118
- :return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it
119
- """
120
- path = self.user_state_dir
121
- if self.opinion:
122
- path = os.path.join(path, "log")
123
- return path
124
-
125
- @property
126
- def user_documents_dir(self) -> str:
127
- """
128
- :return: documents directory tied to the user, e.g. ``~/Documents``
129
- """
130
- documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
131
- if documents_dir is None:
132
- documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
133
- if not documents_dir:
134
- documents_dir = os.path.expanduser("~/Documents")
135
-
136
- return documents_dir
137
-
138
- @property
139
- def user_runtime_dir(self) -> str:
140
- """
141
- :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
142
- ``$XDG_RUNTIME_DIR/$appname/$version``
143
- """
144
- path = os.environ.get("XDG_RUNTIME_DIR", "")
145
- if not path.strip():
146
- path = f"/run/user/{getuid()}"
147
- return self._append_app_name_and_version(path)
148
-
149
- @property
150
- def site_data_path(self) -> Path:
151
- """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
152
- return self._first_item_as_path_if_multipath(self.site_data_dir)
153
-
154
- @property
155
- def site_config_path(self) -> Path:
156
- """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``"""
157
- return self._first_item_as_path_if_multipath(self.site_config_dir)
158
-
159
- @property
160
- def site_cache_path(self) -> Path:
161
- """:return: cache path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
162
- return self._first_item_as_path_if_multipath(self.site_cache_dir)
163
-
164
- def _first_item_as_path_if_multipath(self, directory: str) -> Path:
165
- if self.multipath:
166
- # If multipath is True, the first path is returned.
167
- directory = directory.split(os.pathsep)[0]
168
- return Path(directory)
169
-
170
-
171
- def _get_user_dirs_folder(key: str) -> str | None:
172
- """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/"""
173
- user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs")
174
- if os.path.exists(user_dirs_config_path):
175
- parser = ConfigParser()
176
-
177
- with open(user_dirs_config_path) as stream:
178
- # Add fake section header, so ConfigParser doesn't complain
179
- parser.read_string(f"[top]\n{stream.read()}")
180
-
181
- if key not in parser["top"]:
182
- return None
183
-
184
- path = parser["top"][key].strip('"')
185
- # Handle relative home paths
186
- path = path.replace("$HOME", os.path.expanduser("~"))
187
- return path
188
-
189
- return None
190
-
191
-
192
- __all__ = [
193
- "Unix",
194
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py DELETED
@@ -1,802 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import abc
6
- import functools
7
- import itertools
8
- import re
9
- import warnings
10
- from typing import (
11
- Callable,
12
- Dict,
13
- Iterable,
14
- Iterator,
15
- List,
16
- Optional,
17
- Pattern,
18
- Set,
19
- Tuple,
20
- TypeVar,
21
- Union,
22
- )
23
-
24
- from .utils import canonicalize_version
25
- from .version import LegacyVersion, Version, parse
26
-
27
- ParsedVersion = Union[Version, LegacyVersion]
28
- UnparsedVersion = Union[Version, LegacyVersion, str]
29
- VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
30
- CallableOperator = Callable[[ParsedVersion, str], bool]
31
-
32
-
33
- class InvalidSpecifier(ValueError):
34
- """
35
- An invalid specifier was found, users should refer to PEP 440.
36
- """
37
-
38
-
39
- class BaseSpecifier(metaclass=abc.ABCMeta):
40
- @abc.abstractmethod
41
- def __str__(self) -> str:
42
- """
43
- Returns the str representation of this Specifier like object. This
44
- should be representative of the Specifier itself.
45
- """
46
-
47
- @abc.abstractmethod
48
- def __hash__(self) -> int:
49
- """
50
- Returns a hash value for this Specifier like object.
51
- """
52
-
53
- @abc.abstractmethod
54
- def __eq__(self, other: object) -> bool:
55
- """
56
- Returns a boolean representing whether or not the two Specifier like
57
- objects are equal.
58
- """
59
-
60
- @abc.abstractproperty
61
- def prereleases(self) -> Optional[bool]:
62
- """
63
- Returns whether or not pre-releases as a whole are allowed by this
64
- specifier.
65
- """
66
-
67
- @prereleases.setter
68
- def prereleases(self, value: bool) -> None:
69
- """
70
- Sets whether or not pre-releases as a whole are allowed by this
71
- specifier.
72
- """
73
-
74
- @abc.abstractmethod
75
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
76
- """
77
- Determines if the given item is contained within this specifier.
78
- """
79
-
80
- @abc.abstractmethod
81
- def filter(
82
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
83
- ) -> Iterable[VersionTypeVar]:
84
- """
85
- Takes an iterable of items and filters them so that only items which
86
- are contained within this specifier are allowed in it.
87
- """
88
-
89
-
90
- class _IndividualSpecifier(BaseSpecifier):
91
-
92
- _operators: Dict[str, str] = {}
93
- _regex: Pattern[str]
94
-
95
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
96
- match = self._regex.search(spec)
97
- if not match:
98
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
99
-
100
- self._spec: Tuple[str, str] = (
101
- match.group("operator").strip(),
102
- match.group("version").strip(),
103
- )
104
-
105
- # Store whether or not this Specifier should accept prereleases
106
- self._prereleases = prereleases
107
-
108
- def __repr__(self) -> str:
109
- pre = (
110
- f", prereleases={self.prereleases!r}"
111
- if self._prereleases is not None
112
- else ""
113
- )
114
-
115
- return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
116
-
117
- def __str__(self) -> str:
118
- return "{}{}".format(*self._spec)
119
-
120
- @property
121
- def _canonical_spec(self) -> Tuple[str, str]:
122
- return self._spec[0], canonicalize_version(self._spec[1])
123
-
124
- def __hash__(self) -> int:
125
- return hash(self._canonical_spec)
126
-
127
- def __eq__(self, other: object) -> bool:
128
- if isinstance(other, str):
129
- try:
130
- other = self.__class__(str(other))
131
- except InvalidSpecifier:
132
- return NotImplemented
133
- elif not isinstance(other, self.__class__):
134
- return NotImplemented
135
-
136
- return self._canonical_spec == other._canonical_spec
137
-
138
- def _get_operator(self, op: str) -> CallableOperator:
139
- operator_callable: CallableOperator = getattr(
140
- self, f"_compare_{self._operators[op]}"
141
- )
142
- return operator_callable
143
-
144
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
145
- if not isinstance(version, (LegacyVersion, Version)):
146
- version = parse(version)
147
- return version
148
-
149
- @property
150
- def operator(self) -> str:
151
- return self._spec[0]
152
-
153
- @property
154
- def version(self) -> str:
155
- return self._spec[1]
156
-
157
- @property
158
- def prereleases(self) -> Optional[bool]:
159
- return self._prereleases
160
-
161
- @prereleases.setter
162
- def prereleases(self, value: bool) -> None:
163
- self._prereleases = value
164
-
165
- def __contains__(self, item: str) -> bool:
166
- return self.contains(item)
167
-
168
- def contains(
169
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
170
- ) -> bool:
171
-
172
- # Determine if prereleases are to be allowed or not.
173
- if prereleases is None:
174
- prereleases = self.prereleases
175
-
176
- # Normalize item to a Version or LegacyVersion, this allows us to have
177
- # a shortcut for ``"2.0" in Specifier(">=2")
178
- normalized_item = self._coerce_version(item)
179
-
180
- # Determine if we should be supporting prereleases in this specifier
181
- # or not, if we do not support prereleases than we can short circuit
182
- # logic if this version is a prereleases.
183
- if normalized_item.is_prerelease and not prereleases:
184
- return False
185
-
186
- # Actually do the comparison to determine if this item is contained
187
- # within this Specifier or not.
188
- operator_callable: CallableOperator = self._get_operator(self.operator)
189
- return operator_callable(normalized_item, self.version)
190
-
191
- def filter(
192
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
193
- ) -> Iterable[VersionTypeVar]:
194
-
195
- yielded = False
196
- found_prereleases = []
197
-
198
- kw = {"prereleases": prereleases if prereleases is not None else True}
199
-
200
- # Attempt to iterate over all the values in the iterable and if any of
201
- # them match, yield them.
202
- for version in iterable:
203
- parsed_version = self._coerce_version(version)
204
-
205
- if self.contains(parsed_version, **kw):
206
- # If our version is a prerelease, and we were not set to allow
207
- # prereleases, then we'll store it for later in case nothing
208
- # else matches this specifier.
209
- if parsed_version.is_prerelease and not (
210
- prereleases or self.prereleases
211
- ):
212
- found_prereleases.append(version)
213
- # Either this is not a prerelease, or we should have been
214
- # accepting prereleases from the beginning.
215
- else:
216
- yielded = True
217
- yield version
218
-
219
- # Now that we've iterated over everything, determine if we've yielded
220
- # any values, and if we have not and we have any prereleases stored up
221
- # then we will go ahead and yield the prereleases.
222
- if not yielded and found_prereleases:
223
- for version in found_prereleases:
224
- yield version
225
-
226
-
227
- class LegacySpecifier(_IndividualSpecifier):
228
-
229
- _regex_str = r"""
230
- (?P<operator>(==|!=|<=|>=|<|>))
231
- \s*
232
- (?P<version>
233
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
234
- # string can be just about anything, we match everything
235
- # except for whitespace, a semi-colon for marker support,
236
- # a closing paren since versions can be enclosed in
237
- # them, and a comma since it's a version separator.
238
- )
239
- """
240
-
241
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
242
-
243
- _operators = {
244
- "==": "equal",
245
- "!=": "not_equal",
246
- "<=": "less_than_equal",
247
- ">=": "greater_than_equal",
248
- "<": "less_than",
249
- ">": "greater_than",
250
- }
251
-
252
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
253
- super().__init__(spec, prereleases)
254
-
255
- warnings.warn(
256
- "Creating a LegacyVersion has been deprecated and will be "
257
- "removed in the next major release",
258
- DeprecationWarning,
259
- )
260
-
261
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
262
- if not isinstance(version, LegacyVersion):
263
- version = LegacyVersion(str(version))
264
- return version
265
-
266
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
267
- return prospective == self._coerce_version(spec)
268
-
269
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
270
- return prospective != self._coerce_version(spec)
271
-
272
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
273
- return prospective <= self._coerce_version(spec)
274
-
275
- def _compare_greater_than_equal(
276
- self, prospective: LegacyVersion, spec: str
277
- ) -> bool:
278
- return prospective >= self._coerce_version(spec)
279
-
280
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
281
- return prospective < self._coerce_version(spec)
282
-
283
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
284
- return prospective > self._coerce_version(spec)
285
-
286
-
287
- def _require_version_compare(
288
- fn: Callable[["Specifier", ParsedVersion, str], bool]
289
- ) -> Callable[["Specifier", ParsedVersion, str], bool]:
290
- @functools.wraps(fn)
291
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
292
- if not isinstance(prospective, Version):
293
- return False
294
- return fn(self, prospective, spec)
295
-
296
- return wrapped
297
-
298
-
299
- class Specifier(_IndividualSpecifier):
300
-
301
- _regex_str = r"""
302
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
303
- (?P<version>
304
- (?:
305
- # The identity operators allow for an escape hatch that will
306
- # do an exact string match of the version you wish to install.
307
- # This will not be parsed by PEP 440 and we cannot determine
308
- # any semantic meaning from it. This operator is discouraged
309
- # but included entirely as an escape hatch.
310
- (?<====) # Only match for the identity operator
311
- \s*
312
- [^\s]* # We just match everything, except for whitespace
313
- # since we are only testing for strict identity.
314
- )
315
- |
316
- (?:
317
- # The (non)equality operators allow for wild card and local
318
- # versions to be specified so we have to define these two
319
- # operators separately to enable that.
320
- (?<===|!=) # Only match for equals and not equals
321
-
322
- \s*
323
- v?
324
- (?:[0-9]+!)? # epoch
325
- [0-9]+(?:\.[0-9]+)* # release
326
- (?: # pre release
327
- [-_\.]?
328
- (a|b|c|rc|alpha|beta|pre|preview)
329
- [-_\.]?
330
- [0-9]*
331
- )?
332
- (?: # post release
333
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
334
- )?
335
-
336
- # You cannot use a wild card and a dev or local version
337
- # together so group them with a | and make them optional.
338
- (?:
339
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
340
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
341
- |
342
- \.\* # Wild card syntax of .*
343
- )?
344
- )
345
- |
346
- (?:
347
- # The compatible operator requires at least two digits in the
348
- # release segment.
349
- (?<=~=) # Only match for the compatible operator
350
-
351
- \s*
352
- v?
353
- (?:[0-9]+!)? # epoch
354
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
355
- (?: # pre release
356
- [-_\.]?
357
- (a|b|c|rc|alpha|beta|pre|preview)
358
- [-_\.]?
359
- [0-9]*
360
- )?
361
- (?: # post release
362
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
363
- )?
364
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
365
- )
366
- |
367
- (?:
368
- # All other operators only allow a sub set of what the
369
- # (non)equality operators do. Specifically they do not allow
370
- # local versions to be specified nor do they allow the prefix
371
- # matching wild cards.
372
- (?<!==|!=|~=) # We have special cases for these
373
- # operators so we want to make sure they
374
- # don't match here.
375
-
376
- \s*
377
- v?
378
- (?:[0-9]+!)? # epoch
379
- [0-9]+(?:\.[0-9]+)* # release
380
- (?: # pre release
381
- [-_\.]?
382
- (a|b|c|rc|alpha|beta|pre|preview)
383
- [-_\.]?
384
- [0-9]*
385
- )?
386
- (?: # post release
387
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
388
- )?
389
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
390
- )
391
- )
392
- """
393
-
394
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
395
-
396
- _operators = {
397
- "~=": "compatible",
398
- "==": "equal",
399
- "!=": "not_equal",
400
- "<=": "less_than_equal",
401
- ">=": "greater_than_equal",
402
- "<": "less_than",
403
- ">": "greater_than",
404
- "===": "arbitrary",
405
- }
406
-
407
- @_require_version_compare
408
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
409
-
410
- # Compatible releases have an equivalent combination of >= and ==. That
411
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
412
- # implement this in terms of the other specifiers instead of
413
- # implementing it ourselves. The only thing we need to do is construct
414
- # the other specifiers.
415
-
416
- # We want everything but the last item in the version, but we want to
417
- # ignore suffix segments.
418
- prefix = ".".join(
419
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
420
- )
421
-
422
- # Add the prefix notation to the end of our string
423
- prefix += ".*"
424
-
425
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
426
- prospective, prefix
427
- )
428
-
429
- @_require_version_compare
430
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
431
-
432
- # We need special logic to handle prefix matching
433
- if spec.endswith(".*"):
434
- # In the case of prefix matching we want to ignore local segment.
435
- prospective = Version(prospective.public)
436
- # Split the spec out by dots, and pretend that there is an implicit
437
- # dot in between a release segment and a pre-release segment.
438
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
439
-
440
- # Split the prospective version out by dots, and pretend that there
441
- # is an implicit dot in between a release segment and a pre-release
442
- # segment.
443
- split_prospective = _version_split(str(prospective))
444
-
445
- # Shorten the prospective version to be the same length as the spec
446
- # so that we can determine if the specifier is a prefix of the
447
- # prospective version or not.
448
- shortened_prospective = split_prospective[: len(split_spec)]
449
-
450
- # Pad out our two sides with zeros so that they both equal the same
451
- # length.
452
- padded_spec, padded_prospective = _pad_version(
453
- split_spec, shortened_prospective
454
- )
455
-
456
- return padded_prospective == padded_spec
457
- else:
458
- # Convert our spec string into a Version
459
- spec_version = Version(spec)
460
-
461
- # If the specifier does not have a local segment, then we want to
462
- # act as if the prospective version also does not have a local
463
- # segment.
464
- if not spec_version.local:
465
- prospective = Version(prospective.public)
466
-
467
- return prospective == spec_version
468
-
469
- @_require_version_compare
470
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
471
- return not self._compare_equal(prospective, spec)
472
-
473
- @_require_version_compare
474
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
475
-
476
- # NB: Local version identifiers are NOT permitted in the version
477
- # specifier, so local version labels can be universally removed from
478
- # the prospective version.
479
- return Version(prospective.public) <= Version(spec)
480
-
481
- @_require_version_compare
482
- def _compare_greater_than_equal(
483
- self, prospective: ParsedVersion, spec: str
484
- ) -> bool:
485
-
486
- # NB: Local version identifiers are NOT permitted in the version
487
- # specifier, so local version labels can be universally removed from
488
- # the prospective version.
489
- return Version(prospective.public) >= Version(spec)
490
-
491
- @_require_version_compare
492
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
493
-
494
- # Convert our spec to a Version instance, since we'll want to work with
495
- # it as a version.
496
- spec = Version(spec_str)
497
-
498
- # Check to see if the prospective version is less than the spec
499
- # version. If it's not we can short circuit and just return False now
500
- # instead of doing extra unneeded work.
501
- if not prospective < spec:
502
- return False
503
-
504
- # This special case is here so that, unless the specifier itself
505
- # includes is a pre-release version, that we do not accept pre-release
506
- # versions for the version mentioned in the specifier (e.g. <3.1 should
507
- # not match 3.1.dev0, but should match 3.0.dev0).
508
- if not spec.is_prerelease and prospective.is_prerelease:
509
- if Version(prospective.base_version) == Version(spec.base_version):
510
- return False
511
-
512
- # If we've gotten to here, it means that prospective version is both
513
- # less than the spec version *and* it's not a pre-release of the same
514
- # version in the spec.
515
- return True
516
-
517
- @_require_version_compare
518
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
519
-
520
- # Convert our spec to a Version instance, since we'll want to work with
521
- # it as a version.
522
- spec = Version(spec_str)
523
-
524
- # Check to see if the prospective version is greater than the spec
525
- # version. If it's not we can short circuit and just return False now
526
- # instead of doing extra unneeded work.
527
- if not prospective > spec:
528
- return False
529
-
530
- # This special case is here so that, unless the specifier itself
531
- # includes is a post-release version, that we do not accept
532
- # post-release versions for the version mentioned in the specifier
533
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
534
- if not spec.is_postrelease and prospective.is_postrelease:
535
- if Version(prospective.base_version) == Version(spec.base_version):
536
- return False
537
-
538
- # Ensure that we do not allow a local version of the version mentioned
539
- # in the specifier, which is technically greater than, to match.
540
- if prospective.local is not None:
541
- if Version(prospective.base_version) == Version(spec.base_version):
542
- return False
543
-
544
- # If we've gotten to here, it means that prospective version is both
545
- # greater than the spec version *and* it's not a pre-release of the
546
- # same version in the spec.
547
- return True
548
-
549
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
550
- return str(prospective).lower() == str(spec).lower()
551
-
552
- @property
553
- def prereleases(self) -> bool:
554
-
555
- # If there is an explicit prereleases set for this, then we'll just
556
- # blindly use that.
557
- if self._prereleases is not None:
558
- return self._prereleases
559
-
560
- # Look at all of our specifiers and determine if they are inclusive
561
- # operators, and if they are if they are including an explicit
562
- # prerelease.
563
- operator, version = self._spec
564
- if operator in ["==", ">=", "<=", "~=", "==="]:
565
- # The == specifier can include a trailing .*, if it does we
566
- # want to remove before parsing.
567
- if operator == "==" and version.endswith(".*"):
568
- version = version[:-2]
569
-
570
- # Parse the version, and if it is a pre-release than this
571
- # specifier allows pre-releases.
572
- if parse(version).is_prerelease:
573
- return True
574
-
575
- return False
576
-
577
- @prereleases.setter
578
- def prereleases(self, value: bool) -> None:
579
- self._prereleases = value
580
-
581
-
582
- _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
583
-
584
-
585
- def _version_split(version: str) -> List[str]:
586
- result: List[str] = []
587
- for item in version.split("."):
588
- match = _prefix_regex.search(item)
589
- if match:
590
- result.extend(match.groups())
591
- else:
592
- result.append(item)
593
- return result
594
-
595
-
596
- def _is_not_suffix(segment: str) -> bool:
597
- return not any(
598
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
599
- )
600
-
601
-
602
- def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
603
- left_split, right_split = [], []
604
-
605
- # Get the release segment of our versions
606
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
607
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
608
-
609
- # Get the rest of our versions
610
- left_split.append(left[len(left_split[0]) :])
611
- right_split.append(right[len(right_split[0]) :])
612
-
613
- # Insert our padding
614
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
615
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
616
-
617
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
618
-
619
-
620
- class SpecifierSet(BaseSpecifier):
621
- def __init__(
622
- self, specifiers: str = "", prereleases: Optional[bool] = None
623
- ) -> None:
624
-
625
- # Split on , to break each individual specifier into it's own item, and
626
- # strip each item to remove leading/trailing whitespace.
627
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
628
-
629
- # Parsed each individual specifier, attempting first to make it a
630
- # Specifier and falling back to a LegacySpecifier.
631
- parsed: Set[_IndividualSpecifier] = set()
632
- for specifier in split_specifiers:
633
- try:
634
- parsed.add(Specifier(specifier))
635
- except InvalidSpecifier:
636
- parsed.add(LegacySpecifier(specifier))
637
-
638
- # Turn our parsed specifiers into a frozen set and save them for later.
639
- self._specs = frozenset(parsed)
640
-
641
- # Store our prereleases value so we can use it later to determine if
642
- # we accept prereleases or not.
643
- self._prereleases = prereleases
644
-
645
- def __repr__(self) -> str:
646
- pre = (
647
- f", prereleases={self.prereleases!r}"
648
- if self._prereleases is not None
649
- else ""
650
- )
651
-
652
- return f"<SpecifierSet({str(self)!r}{pre})>"
653
-
654
- def __str__(self) -> str:
655
- return ",".join(sorted(str(s) for s in self._specs))
656
-
657
- def __hash__(self) -> int:
658
- return hash(self._specs)
659
-
660
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
661
- if isinstance(other, str):
662
- other = SpecifierSet(other)
663
- elif not isinstance(other, SpecifierSet):
664
- return NotImplemented
665
-
666
- specifier = SpecifierSet()
667
- specifier._specs = frozenset(self._specs | other._specs)
668
-
669
- if self._prereleases is None and other._prereleases is not None:
670
- specifier._prereleases = other._prereleases
671
- elif self._prereleases is not None and other._prereleases is None:
672
- specifier._prereleases = self._prereleases
673
- elif self._prereleases == other._prereleases:
674
- specifier._prereleases = self._prereleases
675
- else:
676
- raise ValueError(
677
- "Cannot combine SpecifierSets with True and False prerelease "
678
- "overrides."
679
- )
680
-
681
- return specifier
682
-
683
- def __eq__(self, other: object) -> bool:
684
- if isinstance(other, (str, _IndividualSpecifier)):
685
- other = SpecifierSet(str(other))
686
- elif not isinstance(other, SpecifierSet):
687
- return NotImplemented
688
-
689
- return self._specs == other._specs
690
-
691
- def __len__(self) -> int:
692
- return len(self._specs)
693
-
694
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
695
- return iter(self._specs)
696
-
697
- @property
698
- def prereleases(self) -> Optional[bool]:
699
-
700
- # If we have been given an explicit prerelease modifier, then we'll
701
- # pass that through here.
702
- if self._prereleases is not None:
703
- return self._prereleases
704
-
705
- # If we don't have any specifiers, and we don't have a forced value,
706
- # then we'll just return None since we don't know if this should have
707
- # pre-releases or not.
708
- if not self._specs:
709
- return None
710
-
711
- # Otherwise we'll see if any of the given specifiers accept
712
- # prereleases, if any of them do we'll return True, otherwise False.
713
- return any(s.prereleases for s in self._specs)
714
-
715
- @prereleases.setter
716
- def prereleases(self, value: bool) -> None:
717
- self._prereleases = value
718
-
719
- def __contains__(self, item: UnparsedVersion) -> bool:
720
- return self.contains(item)
721
-
722
- def contains(
723
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
724
- ) -> bool:
725
-
726
- # Ensure that our item is a Version or LegacyVersion instance.
727
- if not isinstance(item, (LegacyVersion, Version)):
728
- item = parse(item)
729
-
730
- # Determine if we're forcing a prerelease or not, if we're not forcing
731
- # one for this particular filter call, then we'll use whatever the
732
- # SpecifierSet thinks for whether or not we should support prereleases.
733
- if prereleases is None:
734
- prereleases = self.prereleases
735
-
736
- # We can determine if we're going to allow pre-releases by looking to
737
- # see if any of the underlying items supports them. If none of them do
738
- # and this item is a pre-release then we do not allow it and we can
739
- # short circuit that here.
740
- # Note: This means that 1.0.dev1 would not be contained in something
741
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
742
- if not prereleases and item.is_prerelease:
743
- return False
744
-
745
- # We simply dispatch to the underlying specs here to make sure that the
746
- # given version is contained within all of them.
747
- # Note: This use of all() here means that an empty set of specifiers
748
- # will always return True, this is an explicit design decision.
749
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
750
-
751
- def filter(
752
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
753
- ) -> Iterable[VersionTypeVar]:
754
-
755
- # Determine if we're forcing a prerelease or not, if we're not forcing
756
- # one for this particular filter call, then we'll use whatever the
757
- # SpecifierSet thinks for whether or not we should support prereleases.
758
- if prereleases is None:
759
- prereleases = self.prereleases
760
-
761
- # If we have any specifiers, then we want to wrap our iterable in the
762
- # filter method for each one, this will act as a logical AND amongst
763
- # each specifier.
764
- if self._specs:
765
- for spec in self._specs:
766
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
767
- return iterable
768
- # If we do not have any specifiers, then we need to have a rough filter
769
- # which will filter out any pre-releases, unless there are no final
770
- # releases, and which will filter out LegacyVersion in general.
771
- else:
772
- filtered: List[VersionTypeVar] = []
773
- found_prereleases: List[VersionTypeVar] = []
774
-
775
- item: UnparsedVersion
776
- parsed_version: Union[Version, LegacyVersion]
777
-
778
- for item in iterable:
779
- # Ensure that we some kind of Version class for this item.
780
- if not isinstance(item, (LegacyVersion, Version)):
781
- parsed_version = parse(item)
782
- else:
783
- parsed_version = item
784
-
785
- # Filter out any item which is parsed as a LegacyVersion
786
- if isinstance(parsed_version, LegacyVersion):
787
- continue
788
-
789
- # Store any item which is a pre-release for later unless we've
790
- # already found a final version or we are accepting prereleases
791
- if parsed_version.is_prerelease and not prereleases:
792
- if not filtered:
793
- found_prereleases.append(item)
794
- else:
795
- filtered.append(item)
796
-
797
- # If we've found no items except for pre-releases, then we'll go
798
- # ahead and use the pre-releases
799
- if not filtered and found_prereleases and prereleases is None:
800
- return found_prereleases
801
-
802
- return filtered
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bijoy2001/real-time-voice-recognition/app.py DELETED
@@ -1,20 +0,0 @@
1
-
2
- import gradio as gr
3
- import time
4
- def transcribe (audio, state=" "):
5
- time.sleep(3)
6
- """ speech to text function using the pipeline that we defined"""
7
- text= p(audio) ["text"]
8
- state += text + " "
9
- return state, state
10
- gr.Interface(
11
- fn=transcribe,
12
- inputs=[
13
- gr.inputs.Audio(source="microphone", type="filepath"),
14
- "state"
15
- ],
16
- outputs=[
17
- "textbox",
18
- "state"
19
- ],
20
- live=True).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BilalSardar/YoutubeVideoLink-To-MCQs-Generation/app.py DELETED
@@ -1,320 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from pathlib import Path
4
- from pydub import AudioSegment
5
- from pydub.utils import make_chunks
6
- import os
7
- import gensim
8
- from gensim.test.utils import datapath, get_tmpfile
9
- from gensim.scripts.glove2word2vec import glove2word2vec
10
- from gensim.models import KeyedVectors
11
- import torch
12
- import warnings
13
- import speech_recognition as sr
14
- from transformers import T5ForConditionalGeneration,T5Tokenizer
15
- import nltk
16
- from flashtext import KeywordProcessor
17
- from collections import OrderedDict
18
- from sklearn.metrics.pairwise import cosine_similarity
19
-
20
- nltk.download('punkt')
21
- nltk.download('brown')
22
- nltk.download('wordnet')
23
- nltk.download('stopwords')
24
- from nltk.corpus import wordnet as wn
25
- from nltk.tokenize import sent_tokenize
26
- from textwrap3 import wrap
27
- import random
28
- import numpy as np
29
- from nltk.corpus import stopwords
30
- import string
31
- import pke
32
- import traceback
33
- import spacy
34
-
35
-
36
- warnings.filterwarnings("ignore")
37
- def download_youtube(url, choice, res):
38
-
39
- yt = pytube.YouTube(url)
40
-
41
- if choice == 'mp3':
42
- audio = yt.streams.filter(only_audio=True).first()
43
- print(f"Downloading {audio.title} as MP3")
44
- return audio.download()
45
-
46
- elif choice == 'mp4':
47
- if res == "720p":
48
- video = yt.streams.filter(res="720p").first()
49
- elif res == "1080p":
50
- video = yt.streams.filter(res="1080p").first()
51
- elif res == "2160p":
52
- video = yt.streams.filter(res="2160p").first()
53
- else:
54
- return "Invalid resolution"
55
-
56
- print(f"Downloading {video.title} at {video.resolution}")
57
- return video.download()
58
-
59
- else:
60
- return "Invalid choice"
61
- def Process_audio(fileName):
62
- text=''
63
- txtf=open("The_audio.txt","w+")
64
- myaudio=AudioSegment.from_wav(fileName)
65
- chunks_length_ms=8000
66
- chunks=make_chunks(myaudio,chunks_length_ms)
67
- for i, chunk in enumerate(chunks):
68
- chunkName='./chunked/'+fileName+"_{0}.wav".format(i)
69
- print("I am Exporting",chunkName)
70
- chunk.export(chunkName,format="wav")
71
- File=chunkName
72
- r= sr.Recognizer()
73
- with sr.AudioFile(File) as source:
74
- audio_listened=r.listen(source)
75
-
76
- try:
77
- rec=r.recognize_google(audio_listened)
78
- txtf.write(rec+".")
79
- text+=rec+"."
80
- except sr.UnknownValueError:
81
- print("I dont recognize your audio")
82
- except sr.RequestError as e:
83
- print("could not get result")
84
- return text
85
- try:
86
- os.makedirs("chunked")
87
- except:
88
- pass
89
-
90
- def UrlToAudio(VideoUrl):
91
- url=VideoUrl
92
- #os.system("yt-dlp -x --audio-format wav " + url)
93
- download_youtube(VideoUrl,"mp3","")
94
- # load audio and pad/trim it to fit 30 seconds
95
- base_path = Path(r"")
96
- for wav_file_path in base_path.glob("*.wav"):
97
- Process_audio(str(wav_file_path))
98
- break
99
-
100
-
101
- summary_model = T5ForConditionalGeneration.from_pretrained('t5-base')
102
- summary_tokenizer = T5Tokenizer.from_pretrained('t5-base')
103
-
104
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
105
- summary_model = summary_model.to(device)
106
-
107
-
108
- def set_seed(seed: int):
109
- random.seed(seed)
110
- np.random.seed(seed)
111
- torch.manual_seed(seed)
112
- torch.cuda.manual_seed_all(seed)
113
-
114
- def postprocesstext (content):
115
- final=""
116
- for sent in sent_tokenize(content):
117
- sent = sent.capitalize()
118
- final = final +" "+sent
119
- return final
120
-
121
-
122
- def summarizer(text,model,tokenizer):
123
- text = text.strip().replace("\n"," ")
124
- text = "summarize: "+text
125
- # print (text)
126
- max_len = 512
127
- encoding = tokenizer.encode_plus(text,max_length=max_len, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device)
128
-
129
- input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
130
-
131
- outs = model.generate(input_ids=input_ids,
132
- attention_mask=attention_mask,
133
- early_stopping=True,
134
- num_beams=3,
135
- num_return_sequences=1,
136
- no_repeat_ngram_size=2,
137
- min_length = 75,
138
- max_length=300)
139
-
140
-
141
- dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]
142
- summary = dec[0]
143
- summary = postprocesstext(summary)
144
- summary= summary.strip()
145
-
146
- return summary
147
-
148
-
149
- def get_nouns_multipartite(content):
150
- out=[]
151
- try:
152
- extractor = pke.unsupervised.MultipartiteRank()
153
-
154
- # not contain punctuation marks or stopwords as candidates.
155
- pos = {'PROPN','NOUN'}
156
- #pos = {'PROPN','NOUN'}
157
- stoplist = list(string.punctuation)
158
- stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']
159
- stoplist += stopwords.words('english')
160
-
161
- extractor.load_document(input=content,language='en',
162
- stoplist=stoplist,
163
- normalization=None)
164
-
165
- extractor.candidate_selection(pos=pos)
166
- # 4. build the Multipartite graph and rank candidates using random walk,
167
- # alpha controls the weight adjustment mechanism, see TopicRank for
168
- # threshold/method parameters.
169
- extractor.candidate_weighting(alpha=1.1,
170
- threshold=0.75,
171
- method='average')
172
- keyphrases = extractor.get_n_best(n=15)
173
-
174
-
175
- for val in keyphrases:
176
- out.append(val[0])
177
- except:
178
- out = []
179
- traceback.print_exc()
180
-
181
- return out
182
-
183
- def get_keywords(originaltext,summarytext):
184
- keywords = get_nouns_multipartite(originaltext)
185
- print ("keywords unsummarized: ",keywords)
186
- keyword_processor = KeywordProcessor()
187
- for keyword in keywords:
188
- keyword_processor.add_keyword(keyword)
189
-
190
- keywords_found = keyword_processor.extract_keywords(summarytext)
191
- keywords_found = list(set(keywords_found))
192
- print ("keywords_found in summarized: ",keywords_found)
193
-
194
- important_keywords =[]
195
- for keyword in keywords:
196
- if keyword in keywords_found:
197
- important_keywords.append(keyword)
198
-
199
- return important_keywords[:4]
200
-
201
- question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')
202
- question_tokenizer = T5Tokenizer.from_pretrained('ramsrigouthamg/t5_squad_v1')
203
- question_model = question_model.to(device)
204
-
205
- def get_question(context,answer,model,tokenizer):
206
- text = "context: {} answer: {}".format(context,answer)
207
- encoding = tokenizer.encode_plus(text,max_length=384, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device)
208
- input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
209
-
210
- outs = model.generate(input_ids=input_ids,
211
- attention_mask=attention_mask,
212
- early_stopping=True,
213
- num_beams=5,
214
- num_return_sequences=1,
215
- no_repeat_ngram_size=2,
216
- max_length=72)
217
-
218
-
219
- dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]
220
-
221
-
222
- Question = dec[0].replace("question:","")
223
- Question= Question.strip()
224
- return Question
225
- def get_distractors_wordnet(word):
226
- distractors=[]
227
- try:
228
- syn = wn.synsets(word,'n')[0]
229
-
230
- word= word.lower()
231
- orig_word = word
232
- if len(word.split())>0:
233
- word = word.replace(" ","_")
234
- hypernym = syn.hypernyms()
235
- if len(hypernym) == 0:
236
- return distractors
237
- for item in hypernym[0].hyponyms():
238
- name = item.lemmas()[0].name()
239
- #print ("name ",name, " word",orig_word)
240
- if name == orig_word:
241
- continue
242
- name = name.replace("_"," ")
243
- name = " ".join(w.capitalize() for w in name.split())
244
- if name is not None and name not in distractors:
245
- distractors.append(name)
246
- except:
247
- print ("Wordnet distractors not found")
248
- return distractors
249
-
250
- glove_file = '/home/user/app/glove.6B.300d.txt'
251
- tmp_file = '/home/user/app/word2vec-glove.6B.300d.txt'
252
-
253
- glove2word2vec(glove_file, tmp_file)
254
- model = KeyedVectors.load_word2vec_format(tmp_file)
255
- def generate_distractors(answer, count):
256
- answer = str.lower(answer)
257
-
258
- ##Extracting closest words for the answer.
259
- try:
260
- closestWords = model.most_similar(positive=[answer], topn=count)
261
- except:
262
- #In case the word is not in the vocabulary, or other problem not loading embeddings
263
- return []
264
-
265
- #Return count many distractors
266
- distractors = list(map(lambda x: x[0], closestWords))[0:count]
267
-
268
- return distractors
269
- context1 = gr.inputs.Textbox(lines=10, placeholder="Enter link here...")
270
- output = gr.outputs.HTML( label="Question and Answers")
271
- radiobutton = gr.inputs.Radio(["Wordnet", "Gensim"])
272
-
273
- def generate_question(context1,radiobutton):
274
- # try:
275
-
276
- f = open("The_audio.txt", "w+")
277
- context=f.read()
278
- summary_text = summarizer(context,summary_model,summary_tokenizer)
279
- for wrp in wrap(summary_text, 150):
280
- print (wrp)
281
- # np = getnounphrases(summary_text,sentence_transformer_model,3)
282
- np = get_keywords(context,summary_text)
283
- print ("\n\nNoun phrases",np)
284
- output=""
285
- for answer in np:
286
- ques = get_question(summary_text,answer,question_model,question_tokenizer)
287
- if radiobutton=="Wordnet":
288
- distractors = get_distractors_wordnet(answer)
289
- else:
290
- distractors = generate_distractors(answer.capitalize(),3)
291
- print(distractors)
292
-
293
- # output= output + ques + "\n" + "Ans: "+answer.capitalize() + "\n\n"
294
- output ="\n"+ output + "<b style='color:blue;'>" + ques + "</b>"
295
- # output = output + "<br>"
296
- output ="\n"+ output + "<b style='color:green;'>" + "Ans: " +answer.capitalize()+ "</b>"
297
- if len(distractors)>0:
298
- for distractor in distractors[:4]:
299
- output = output + " <b style='color:brown;'>" + distractor+ "</b>\n"
300
- output = output + "<br>"
301
-
302
- summary ="Summary: "+ summary_text
303
- for answer in np:
304
- summary = summary.replace(answer,"<b>"+answer+"</b>")
305
- summary = summary.replace(answer.capitalize(),"<b>"+answer.capitalize()+"</b>")
306
- output = output + "<p>"+summary+"</p>"
307
- return output
308
- # except:
309
- # return "Something Went Wrong...Please Check Link or try Again"
310
-
311
-
312
-
313
- iface = gr.Interface(
314
- fn=generate_question,
315
- inputs=[context1,radiobutton],
316
- title="VidQuest",
317
- examples=[["https://www.youtube.com/watch?v=WSbgixdC9g8","Gensim"]],
318
- description="Keep in mind that it might take some minutes. Correct answers appear in green, while incorrect choices appear in red. Use the Gensim tool to find the most appropriate distractions.",
319
- outputs=output)
320
- iface.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BridgeTower/bridgetower-video-search/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Bridgetower Video Search
3
- emoji: 🏃
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_tagbased_polymorphic.cpp DELETED
@@ -1,142 +0,0 @@
1
- /*
2
- tests/test_tagbased_polymorphic.cpp -- test of polymorphic_type_hook
3
-
4
- Copyright (c) 2018 Hudson River Trading LLC <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
- #include <pybind11/stl.h>
12
-
13
- struct Animal
14
- {
15
- // Make this type also a "standard" polymorphic type, to confirm that
16
- // specializing polymorphic_type_hook using enable_if_t still works
17
- // (https://github.com/pybind/pybind11/pull/2016/).
18
- virtual ~Animal() = default;
19
-
20
- // Enum for tag-based polymorphism.
21
- enum class Kind {
22
- Unknown = 0,
23
- Dog = 100, Labrador, Chihuahua, LastDog = 199,
24
- Cat = 200, Panther, LastCat = 299
25
- };
26
- static const std::type_info* type_of_kind(Kind kind);
27
- static std::string name_of_kind(Kind kind);
28
-
29
- const Kind kind;
30
- const std::string name;
31
-
32
- protected:
33
- Animal(const std::string& _name, Kind _kind)
34
- : kind(_kind), name(_name)
35
- {}
36
- };
37
-
38
- struct Dog : Animal
39
- {
40
- Dog(const std::string& _name, Kind _kind = Kind::Dog) : Animal(_name, _kind) {}
41
- std::string bark() const { return name_of_kind(kind) + " " + name + " goes " + sound; }
42
- std::string sound = "WOOF!";
43
- };
44
-
45
- struct Labrador : Dog
46
- {
47
- Labrador(const std::string& _name, int _excitement = 9001)
48
- : Dog(_name, Kind::Labrador), excitement(_excitement) {}
49
- int excitement;
50
- };
51
-
52
- struct Chihuahua : Dog
53
- {
54
- Chihuahua(const std::string& _name) : Dog(_name, Kind::Chihuahua) { sound = "iyiyiyiyiyi"; }
55
- std::string bark() const { return Dog::bark() + " and runs in circles"; }
56
- };
57
-
58
- struct Cat : Animal
59
- {
60
- Cat(const std::string& _name, Kind _kind = Kind::Cat) : Animal(_name, _kind) {}
61
- std::string purr() const { return "mrowr"; }
62
- };
63
-
64
- struct Panther : Cat
65
- {
66
- Panther(const std::string& _name) : Cat(_name, Kind::Panther) {}
67
- std::string purr() const { return "mrrrRRRRRR"; }
68
- };
69
-
70
- std::vector<std::unique_ptr<Animal>> create_zoo()
71
- {
72
- std::vector<std::unique_ptr<Animal>> ret;
73
- ret.emplace_back(new Labrador("Fido", 15000));
74
-
75
- // simulate some new type of Dog that the Python bindings
76
- // haven't been updated for; it should still be considered
77
- // a Dog, not just an Animal.
78
- ret.emplace_back(new Dog("Ginger", Dog::Kind(150)));
79
-
80
- ret.emplace_back(new Chihuahua("Hertzl"));
81
- ret.emplace_back(new Cat("Tiger", Cat::Kind::Cat));
82
- ret.emplace_back(new Panther("Leo"));
83
- return ret;
84
- }
85
-
86
- const std::type_info* Animal::type_of_kind(Kind kind)
87
- {
88
- switch (kind) {
89
- case Kind::Unknown: break;
90
-
91
- case Kind::Dog: break;
92
- case Kind::Labrador: return &typeid(Labrador);
93
- case Kind::Chihuahua: return &typeid(Chihuahua);
94
- case Kind::LastDog: break;
95
-
96
- case Kind::Cat: break;
97
- case Kind::Panther: return &typeid(Panther);
98
- case Kind::LastCat: break;
99
- }
100
-
101
- if (kind >= Kind::Dog && kind <= Kind::LastDog) return &typeid(Dog);
102
- if (kind >= Kind::Cat && kind <= Kind::LastCat) return &typeid(Cat);
103
- return nullptr;
104
- }
105
-
106
- std::string Animal::name_of_kind(Kind kind)
107
- {
108
- std::string raw_name = type_of_kind(kind)->name();
109
- py::detail::clean_type_id(raw_name);
110
- return raw_name;
111
- }
112
-
113
- namespace pybind11 {
114
- template <typename itype>
115
- struct polymorphic_type_hook<itype, detail::enable_if_t<std::is_base_of<Animal, itype>::value>>
116
- {
117
- static const void *get(const itype *src, const std::type_info*& type)
118
- { type = src ? Animal::type_of_kind(src->kind) : nullptr; return src; }
119
- };
120
- }
121
-
122
- TEST_SUBMODULE(tagbased_polymorphic, m) {
123
- py::class_<Animal>(m, "Animal")
124
- .def_readonly("name", &Animal::name);
125
- py::class_<Dog, Animal>(m, "Dog")
126
- .def(py::init<std::string>())
127
- .def_readwrite("sound", &Dog::sound)
128
- .def("bark", &Dog::bark);
129
- py::class_<Labrador, Dog>(m, "Labrador")
130
- .def(py::init<std::string, int>(), "name"_a, "excitement"_a = 9001)
131
- .def_readwrite("excitement", &Labrador::excitement);
132
- py::class_<Chihuahua, Dog>(m, "Chihuahua")
133
- .def(py::init<std::string>())
134
- .def("bark", &Chihuahua::bark);
135
- py::class_<Cat, Animal>(m, "Cat")
136
- .def(py::init<std::string>())
137
- .def("purr", &Cat::purr);
138
- py::class_<Panther, Cat>(m, "Panther")
139
- .def(py::init<std::string>())
140
- .def("purr", &Panther::purr);
141
- m.def("create_zoo", &create_zoo);
142
- };