parquet-converter commited on
Commit
644ff07
·
1 Parent(s): ef5ebd1

Update parquet files (step 53 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/__init__.py +0 -93
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Camel audio alchemy download Create your own unique sounds with Alchemys sample import and resynthesis features.md +0 -118
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackeddll 3.1.5.0.exe Download A Simple and Effective Way to Bypass Software Protection.md +0 -134
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fences 3 Serial Key.md +0 -143
  5. spaces/1gistliPinn/ChatGPT4/Examples/Dr.Fone 9.6.2 Crack With Registration Codes Full Free Download VERIFIED.md +0 -39
  6. spaces/1phancelerku/anime-remove-background/Bike 3D Game Race Stunt and Customize Your Motorbike.md +0 -151
  7. spaces/1phancelerku/anime-remove-background/Download Mod Truckers of Europe 3 The Best Truck Simulator Game Ever.md +0 -115
  8. spaces/1phancelerku/anime-remove-background/Download Q dana APK and Enjoy Low Interest Rates and Flexible Repayment Terms.md +0 -131
  9. spaces/1phancelerku/anime-remove-background/Explore the Secrets of Evolution with Dino World Jurassic Builder 2 MOD APK.md +0 -94
  10. spaces/801artistry/RVC801/train/mel_processing.py +0 -130
  11. spaces/A00001/bingothoo/src/components/chat-history.tsx +0 -48
  12. spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/index.html +0 -113
  13. spaces/AIatUIUC/CodeLATS/lats/utils.py +0 -73
  14. spaces/AMR-KELEG/ALDi/app.py +0 -170
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_cifar.py +0 -16
  16. spaces/Ababababababbababa/poetry/README.md +0 -14
  17. spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/comet_utils.py +0 -166
  18. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/[id]/summarize/$types.d.ts +0 -9
  19. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Myshell.py +0 -173
  20. spaces/Adithedev/Text-Summarization-Tool/app.py +0 -81
  21. spaces/Aditya9790/yolo7-object-tracking/sort.py +0 -367
  22. spaces/Ameaou/academic-chatgpt3.1/README.md +0 -300
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +0 -882
  24. spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_base/run.sh +0 -10
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnet.py +0 -663
  26. spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/analyze_logs.py +0 -179
  27. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/dmnet_r50-d8.py +0 -44
  28. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py +0 -2
  29. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/whisper_stt/readme.md +0 -15
  30. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/image_datasets.py +0 -167
  31. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/masked_conv.py +0 -111
  32. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/test.py +0 -238
  33. spaces/ArtGAN/Diffusion-API/README.md +0 -15
  34. spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/img2img_app.py +0 -155
  35. spaces/ArtyomKhyan/Detection/models/experimental.py +0 -109
  36. spaces/AzinZ/vitscn/transforms.py +0 -193
  37. spaces/Banbri/zcvzcv/src/app/queries/mockLLMResponse.ts +0 -24
  38. spaces/Benson/text-generation/Examples/Cmo Descargar Naruto Ultimate Ninja Storm 4 Para Android.md +0 -86
  39. spaces/Benson/text-generation/Examples/Cruce De Carretera Todo Desbloqueado Apk.md +0 -90
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/build.py +0 -146
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/config/pyprojecttoml.py +0 -493
  42. spaces/CALM/Dashboard/streamlit_observable/frontend/build/precache-manifest.2e1db2924cb1e112608cee049b0d33cc.js +0 -26
  43. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/deform_conv.py +0 -494
  44. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TridentNet/tridentnet/config.py +0 -26
  45. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TridentNet/tridentnet/trident_conv.py +0 -107
  46. spaces/CVPR/LIVE/pydiffvg/pixel_filter.py +0 -9
  47. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/mismatch.h +0 -22
  48. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/get_value.h +0 -23
  49. spaces/CVPR/WALT/infer.py +0 -118
  50. spaces/CVPR/WALT/mmdet/core/evaluation/mean_ap.py +0 -469
spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/__init__.py DELETED
@@ -1,93 +0,0 @@
1
- from json import dumps, loads
2
- from os import getenv
3
- from random import randint
4
- from re import search
5
- from urllib.parse import urlencode
6
-
7
- from bard.typings import BardResponse
8
- from dotenv import load_dotenv
9
- from requests import Session
10
-
11
- load_dotenv()
12
- token = getenv('1psid')
13
- proxy = getenv('proxy')
14
-
15
- temperatures = {
16
- 0: "Generate text strictly following known patterns, with no creativity.",
17
- 0.1: "Produce text adhering closely to established patterns, allowing minimal creativity.",
18
- 0.2: "Create text with modest deviations from familiar patterns, injecting a slight creative touch.",
19
- 0.3: "Craft text with a mild level of creativity, deviating somewhat from common patterns.",
20
- 0.4: "Formulate text balancing creativity and recognizable patterns for coherent results.",
21
- 0.5: "Generate text with a moderate level of creativity, allowing for a mix of familiarity and novelty.",
22
- 0.6: "Compose text with an increased emphasis on creativity, while partially maintaining familiar patterns.",
23
- 0.7: "Produce text favoring creativity over typical patterns for more original results.",
24
- 0.8: "Create text heavily focused on creativity, with limited concern for familiar patterns.",
25
- 0.9: "Craft text with a strong emphasis on unique and inventive ideas, largely ignoring established patterns.",
26
- 1: "Generate text with maximum creativity, disregarding any constraints of known patterns or structures."
27
- }
28
-
29
-
30
- class Completion:
31
- def create(
32
- prompt: str = 'hello world',
33
- temperature: int = None,
34
- conversation_id: str = '',
35
- response_id: str = '',
36
- choice_id: str = '') -> BardResponse:
37
-
38
- if temperature:
39
- prompt = f'''settings: follow these settings for your response: [temperature: {temperature} - {temperatures[temperature]}] | prompt : {prompt}'''
40
-
41
- client = Session()
42
- client.proxies = {
43
- 'http': f'http://{proxy}',
44
- 'https': f'http://{proxy}'} if proxy else None
45
-
46
- client.headers = {
47
- 'authority': 'bard.google.com',
48
- 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
49
- 'origin': 'https://bard.google.com',
50
- 'referer': 'https://bard.google.com/',
51
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
52
- 'x-same-domain': '1',
53
- 'cookie': f'__Secure-1PSID={token}'
54
- }
55
-
56
- snlm0e = search(r'SNlM0e\":\"(.*?)\"',
57
- client.get('https://bard.google.com/').text).group(1)
58
-
59
- params = urlencode({
60
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
61
- '_reqid': randint(1111, 9999),
62
- 'rt': 'c',
63
- })
64
-
65
- response = client.post(
66
- f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
67
- data={
68
- 'at': snlm0e,
69
- 'f.req': dumps([None, dumps([
70
- [prompt],
71
- None,
72
- [conversation_id, response_id, choice_id],
73
- ])])
74
- }
75
- )
76
-
77
- chat_data = loads(response.content.splitlines()[3])[0][2]
78
- if not chat_data:
79
- print('error, retrying')
80
- Completion.create(prompt, temperature,
81
- conversation_id, response_id, choice_id)
82
-
83
- json_chat_data = loads(chat_data)
84
- results = {
85
- 'content': json_chat_data[0][0],
86
- 'conversation_id': json_chat_data[1][0],
87
- 'response_id': json_chat_data[1][1],
88
- 'factualityQueries': json_chat_data[3],
89
- 'textQuery': json_chat_data[2][0] if json_chat_data[2] is not None else '',
90
- 'choices': [{'id': i[0], 'content': i[1]} for i in json_chat_data[4]],
91
- }
92
-
93
- return BardResponse(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Camel audio alchemy download Create your own unique sounds with Alchemys sample import and resynthesis features.md DELETED
@@ -1,118 +0,0 @@
1
- <br />
2
- <h1>Camel Audio Alchemy Download: The Ultimate Sample Manipulation Synthesizer</h1>
3
- <p>If you are looking for a powerful and versatile synthesizer that can turn your musical dreams into reality, you might want to check out Camel Audio's Alchemy. Alchemy is a hybrid synth that combines additive, spectral, granular, sampling, and virtual analog synthesis in one plugin. It also features a wide range of filters, effects, modulation options, and an arpeggiator that can sync to any MIDI groove. With over 2GB of samples and 500 presets included, you will never run out of inspiration with Alchemy. In this article, we will show you how to download Alchemy for Mac or Windows, how to use its features, and why it is one of the best synths on the market.</p>
4
- <h2>What is Alchemy?</h2>
5
- <p>Alchemy is a synth that can do anything from fattening up a kick drum, to producing a great guitar sound or powerful dance bassline. It can also create lush pads, soundscapes, keys, leads, and more. It is described by Camel Audio as "the ultimate sample manipulation synthesizer". Here are some of its main features:</p>
6
- <h2>Camel audio alchemy download</h2><br /><p><b><b>Download Zip</b> &#9193; <a href="https://byltly.com/2uKw0M">https://byltly.com/2uKw0M</a></b></p><br /><br />
7
- <ul>
8
- <li><strong>Synthesis modes:</strong> Alchemy can use up to four sources for each sound. Each source can be either additive, spectral, granular, sampling, or virtual analog. You can mix and match different synthesis modes to create unique sounds.</li>
9
- <li><strong>Sources:</strong> Alchemy comes with over 2GB of samples and analysed content from various sound designers. You can also import your own samples from SFZ, WAV or AIFF files. You can use up to four samples per source and edit their start/end points, loop modes, pitch envelopes, etc.</li>
10
- <li><strong>Filters:</strong> Alchemy has a wide range of analog modeled filters that can be applied to each source or globally. You can choose from low-pass, high-pass, band-pass, notch, comb, formant, ring modulator, etc. You can also adjust the cutoff frequency, resonance, drive, feedback, etc.</li>
11
- <li><strong>Effects:</strong> Alchemy has a flexible rack of effects that can be applied to each source or globally. You can choose from distortion, compression, filter, reverb, delay, chorus, flanger, phaser, etc. You can also adjust the parameters of each effect.</li>
12
- <li><strong>Modulation:</strong> Alchemy has an innovative modulation system that is extremely flexible yet easy to use. You can modulate almost any parameter with up to eight modulators per source or globally. You can choose from envelopes, LFOs, MSEGs, step sequencers, performers, etc. You can also adjust the amount, rate, shape, sync, etc. of each modulator.</li>
13
- <li><strong>Arpeggiator:</strong> Alchemy has a powerful arpeggiator that can create complex rhythmic patterns with up to 16 steps per source or globally. You can adjust the pitch, velocity, gate, swing, etc. of each step. You can also import the groove from any MIDI file for immediate synchronization to a beat.</li>
14
- </ul>
15
- <h2>How to Download Alchemy?</h2>
16
- <h3>System Requirements</h3>
17
- <p>To run Alchemy on your computer, you need to meet the following system requirements:</p>
18
- <table>
19
- <tr><th>Operating System</th><th>Minimum</th><th>Recommended</th></tr>
20
- <tr><td>Mac OS X</td><td>10.6 or higher</td><td>10.9 or higher</td></tr>
21
- <tr><td>Windows</td><td>XP SP2 or higher</td><td>7 or higher</td></tr>
22
- <tr><td>CPU</td><td>Intel Core 2 Duo 2GHz or equivalent</td><td>Intel Core i5/i7 2GHz or higher</td></tr>
23
- <tr><td>RAM</td><td>1GB</td><td>4GB or more</td></tr>
24
- <tr><td>Disk Space</td><td>3GB</td><td>6GB or more</td></tr>
25
- <tr><td>Audio Interface</td><td>ASIO compatible (Windows) / Core Audio compatible (Mac)</td><td>-</td></tr>
26
- <tr><td>MIDI Controller</td><td>-</td><td>MIDI keyboard with knobs/faders/pads (optional)</td></tr>
27
- <tr><td>VST/AU Host</td><td>-</td><td>Ableton Live, Logic Pro, Cubase, FL Studio, etc.</td></tr>
28
- </table>
29
- <h3>Download Links</h3>
30
- <p>To download Alchemy for Mac or Windows, you need to visit one of the following links:</p>
31
- <table>
32
- <tr><th>Type</th><th>Name</th><th>Description</th></tr>
33
- <tr><td>Official</td><td>Camel Audio Website (No Longer Available)</a></p></li>
34
- This was the original website where you could buy and download Alchemy and its soundbanks. However, it was shut down in 2015 after Camel Audio was acquired by Apple.</li></ul>
35
- <tr>
36
- <td colspan="3">Official<td>
37
- <td colspan="3">Camel Audio Support Page (No Longer Available)</a></p>
38
- <p>Camel audio alchemy synth hybrid plugin<br />
39
- Alchemy by camel audio virtual instrument<br />
40
- Camel audio alchemy sample manipulation synthesizer<br />
41
- How to import samples into camel audio alchemy<br />
42
- Camel audio alchemy soundbank player free download<br />
43
- Camel audio alchemy 5.5GB pack of audio samples<br />
44
- Camel audio alchemy additive spectral granular synthesis<br />
45
- Camel audio alchemy flexible rack of effects<br />
46
- Camel audio alchemy innovative modulation system<br />
47
- Camel audio alchemy powerful arpeggiator with groove import<br />
48
- Camel audio alchemy presets by top sound designers<br />
49
- Camel audio alchemy expansion sound banks<br />
50
- Camel audio alchemy compatible with SFZ WAV AIFF files<br />
51
- Camel audio alchemy analog modelled filters<br />
52
- Camel audio alchemy morph or crossfade between sources<br />
53
- Camel audio alchemy resynthesis and sound morphing abilities<br />
54
- Camel audio alchemy review and tutorial<br />
55
- Camel audio alchemy license and price<br />
56
- Camel audio alchemy vs omnisphere vs kontakt<br />
57
- Camel audio alchemy discontinued and alternatives<br />
58
- Where to buy camel audio alchemy online<br />
59
- How to install camel audio alchemy on mac or windows<br />
60
- How to use camel audio alchemy with logic pro x or ableton live<br />
61
- How to create your own presets with camel audio alchemy<br />
62
- How to update camel audio alchemy to version 1.55<br />
63
- Best tips and tricks for camel audio alchemy users<br />
64
- How to fix camel audio alchemy errors and crashes<br />
65
- How to uninstall camel audio alchemy completely<br />
66
- How to backup and restore camel audio alchemy data<br />
67
- How to get camel crusher and cameleon 5000 by camel audio</p></li>
68
- This was the official support page where you could download updates and manuals for Alchemy. However, it was also shut down in 2015 after Camel Audio was acquired by Apple.</li></ul>
69
- <tr>
70
- <td colspan="3">Alternative<td>
71
- <td colspan="3">KVR Audio Website (Available)</a></p></li>
72
- This is a website where you can find information about various audio plugins. It has a page dedicated to Alchemy where you can download the latest version (1.55) for Mac or Windows. You can also find user reviews, ratings, and comments about Alchemy.</li></ul>
73
- <tr>
74
- <td colspan="3">Alternative<td>
75
- <td colspan="3">Camel Audio Archive Website (Available)</a></p></li>
76
- This is a website where you can find archived versions of Camel Audio's products. It has a page dedicated to Alchemy where you can download older versions (1.0-1.50) for Mac or Windows. You can also find manuals, soundbanks, and tutorials for Alchemy.</li></ul>
77
- <tr>
78
- <td colspan="3">Note: To use any of these download links, you need to have a valid license key for Alchemy. If you don't have one, you won't be able to activate the plugin. You can try contacting Apple Support if you have purchased Alchemy before, or look for alternative ways to obtain a license key online.</p></li>
79
- <h2>
80
- How to Use Alchemy?</h2>
81
- <p>
82
- Once you have downloaded and installed Alchemy on your computer, you are ready to start using it. Here are some basic steps on how to use its features:</p>
83
- <h3>Loading Presets <h3>
84
- <p>
85
- Alchemy comes with over 500 presets that cover various genres, styles, You can load them by clicking on the preset browser button on the top left corner of the plugin window. You can then browse the presets by category, rating, name, author, etc. You can also use the search box to find a preset by keyword. To load a preset, simply double-click on it or drag and drop it onto the plugin window. You can also use the arrow keys to navigate through the presets and press enter to load them.</p>
86
- <h3>Importing Samples</h3>
87
- <p>If you want to use your own samples as sources for Alchemy, you can import them from SFZ, WAV or AIFF files. To do so, you need to click on the import button on the top right corner of the source editor window. You can then browse your computer for the file you want to import. You can also drag and drop the file onto the source editor window. Once you have imported a sample, you can edit its parameters such as start/end points, loop mode, pitch envelope, etc. You can also analyze the sample for additive or spectral synthesis modes.</p>
88
- <h3>Morphing and Crossfading</h3>
89
- <p>One of the most powerful features of Alchemy is its ability to morph and crossfade between sources. You can use the performance controls and remix pads to do this. The performance controls are located on the bottom left corner of the plugin window. They consist of eight knobs and eight sliders that can be assigned to any parameter of Alchemy. You can use them to tweak your sound in real time. The remix pads are located on the bottom right corner of the plugin window. They consist of eight pads that can be assigned to different snapshots of your sound. You can use them to morph and crossfade between sources by clicking and dragging on them. You can also automate them with MIDI or host automation.</p>
90
- <h3>Editing Parameters</h3>
91
- <p>If you want to access and adjust the parameters of each synthesis mode, filter, effect, modulator, and arpeggiator, you need to click on the corresponding button on the top of the plugin window. You will then see a detailed editor window where you can edit each parameter with knobs, sliders, envelopes, graphs, etc. You can also right-click on any parameter to assign it to a performance control or a modulator.</p>
92
- <h2>Why Choose Alchemy?</h2>
93
- <p>Alchemy is not just another synth plugin. It is a creative tool that can help you design any sound you can imagine. Here are some of the reasons why you should choose Alchemy for your sound design and music production needs:</p>
94
- <ul>
95
- <li><strong>Versatility:</strong> Alchemy can create any type of sound from acoustic to electronic, from realistic to surreal, from simple to complex. It can also blend different synthesis modes and sources to create hybrid sounds that are unique and original.</li>
96
- <li><strong>Quality:</strong> Alchemy has a high-quality sound engine that delivers crystal-clear and rich sounds. It also has a wide range of analog modeled filters and effects that add warmth and character to your sounds.</li>
97
- <li><strong>Usability:</strong> Alchemy is easy to use thanks to its intuitive interface and performance controls. It also has a comprehensive preset browser that lets you find the sound you need quickly and easily.</li>
98
- <li><strong>Inspiration:</strong> Alchemy comes with over 2GB of samples and 500 presets that cover various genres, styles, and sounds. You can also import your own samples and use them as sources for Alchemy. You can also use the morphing and crossfading features to create new sounds from existing ones.</li>
99
- <li><strong>Value:</strong> Alchemy is a great value for money as it offers a lot of features and sounds for a reasonable price. You can also expand your sound library with additional soundbanks that are available for purchase.</li>
100
- </ul>
101
- <h2>Conclusion</h2>
102
- <p>In conclusion, Alchemy is a synth plugin that you should definitely try if you are looking for a powerful and versatile synthesizer that can turn your musical dreams into reality. It offers a lot of features and sounds that will inspire you and enhance your sound design and music production skills. You can download Alchemy for Mac or Windows from one of the links provided in this article and start creating amazing sounds with it.</p>
103
- <h2>Frequently Asked Questions</h2>
104
- <ol>
105
- <li><strong>Is Alchemy still available?</strong></li>
106
- <p>Yes, Alchemy is still available for download from some alternative websites such as KVR Audio or Camel Audio Archive. However, it is no longer supported or updated by Camel Audio or Apple.</p>
107
- <li><strong>Can I use Alchemy with Logic Pro X?</strong></li>
108
- <p>Yes, you can use Alchemy with Logic Pro X as an Audio Unit plugin. However, you should note that Logic Pro X already comes with an updated version of Alchemy that has more features and sounds than the original one.</p>
109
- <li><strong>How do I activate Alchemy?</strong></li>
110
- <p>To activate Alchemy, you need to have a valid license key that you received when you purchased Alchemy from Camel Audio or Apple. You need to enter this license key when you launch Alchemy for the first time.</p>
111
- <li><strong>How do I update Alchemy?</strong></li>
112
- <p>To update Alchemy, you need to download the latest version (1.55) from one of the alternative websites such as KVR Audio or Camel Audio Archive. You then need to install it over your existing version of Alchemy.</p>
113
- <li><strong>How do I get more sounds for Alchemy?</strong></li>
114
- <p>To get more sounds for Alchemy, you can purchase additional soundbanks from Camel Audio's website (no longer available) or from other third-party sound designers such as Biome Digital or Sample Magic. You can also create your own sounds by importing your own samples or using the synthesis modes of Alchemy.</p>
115
- </ol>
116
- </p> 0a6ba089eb<br />
117
- <br />
118
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackeddll 3.1.5.0.exe Download A Simple and Effective Way to Bypass Software Protection.md DELETED
@@ -1,134 +0,0 @@
1
- <br />
2
- <h1>What is eddll 3.1.5.0.exe and why do you need it?</h1>
3
- <p>Have you ever wondered how to keep your Dell system software up to date and secure? If so, you might have heard of eddll 3.1.5.0.exe, a stand-alone application that provides updates for system software that is released by Dell.</p>
4
- <p>Eddll 3.1.5.0.exe is also known as Dell Command Update, a tool that simplifies the BIOS, firmware, driver, and application update experience for Dell client hardware. It helps you to find and install the latest updates for your system software in a few clicks.</p>
5
- <h2>crackeddll 3.1.5.0.exe download</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://byltly.com/2uKzSv">https://byltly.com/2uKzSv</a></b></p><br /><br />
6
- <p>But how do you download eddll 3.1.5.0.exe safely and securely? How do you use it to update your Dell system software? What are the benefits and risks of using it? And how do you troubleshoot common problems with it?</p>
7
- <p>In this article, we will answer all these questions and more, so that you can make the most of eddll 3.1.5.0.exe and keep your Dell system software current and compatible.</p>
8
- <h2>How to download eddll 3.1.5.0.exe safely and securely?</h2>
9
- <p>Before you can use eddll 3.1.5.0.exe to update your Dell system software, you need to download it from a reliable source.</p>
10
- <p>However, downloading any file from the internet can be risky, especially if you are not careful about where you get it from.</p>
11
- <p>crackeddll 3.1.5.0 free download<br />
12
- crackeddll 3.1.5.0 software<br />
13
- crackeddll 3.1.5.0 zip<br />
14
- crackeddll 3.1.5.0 rar<br />
15
- crackeddll 3.1.5.0 torrent<br />
16
- crackeddll 3.1.5.0 full version<br />
17
- crackeddll 3.1.5.0 license key<br />
18
- crackeddll 3.1.5.0 activation code<br />
19
- crackeddll 3.1.5.0 serial number<br />
20
- crackeddll 3.1.5.0 crack<br />
21
- crackeddll 3.1.5.0 patch<br />
22
- crackeddll 3.1.5.0 keygen<br />
23
- crackeddll 3.1.5.0 hack<br />
24
- crackeddll 3.1.5.0 mod<br />
25
- crackeddll 3.1.5.0 fix<br />
26
- crackeddll 3.1.5.0 repair<br />
27
- crackeddll 3.1.5.0 update<br />
28
- crackeddll 3.1.5.0 latest version<br />
29
- crackeddll 3.1.5.exe download for windows<br />
30
- crackeddll 3.exe download for mac<br />
31
- cracked dll files fixer download<br />
32
- cracked dll files for games download<br />
33
- how to crack software by modifying dll files<br />
34
- how to use cracked dll 3.exe<br />
35
- how to install cracked dll 3.exe<br />
36
- how to uninstall cracked dll 3.exe<br />
37
- how to remove cracked dll virus<br />
38
- how to fix dll errors with cracked dll software<br />
39
- how to download dll files for free with cracked dll software<br />
40
- how to backup and restore dll files with cracked dll software<br />
41
- how to optimize pc performance with cracked dll software<br />
42
- how to prevent blue screen of death with cracked dll software<br />
43
- how to solve exe virus infection with cracked dll software<br />
44
- how to enhance pc security with cracked dll software<br />
45
- benefits of using cracked dll software<br />
46
- features of cracked dll software<br />
47
- reviews of cracked dll software<br />
48
- alternatives to cracked dll software<br />
49
- comparison of cracked dll software and other dll fixers<br />
50
- pros and cons of using cracked dll software<br />
51
- risks of using cracked dll software<br />
52
- legality of using cracked dll software<br />
53
- safety of using cracked dll software<br />
54
- reliability of using cracked dll software<br />
55
- compatibility of using cracked dll software with different windows versions <br />
56
- troubleshooting tips for using cracked dll software <br />
57
- customer support for using cracked dll software <br />
58
- tutorials for using cracked dll software <br />
59
- testimonials for using cracked dll software <br />
60
- discounts and offers for using cracked dll software</p>
61
- .0.exe safely and securely:</p>
62
- <ol>
63
- <li>Check your system compatibility and requirements. Eddll 3.1.5.0.exe is designed to run on Microsoft Windows 64bit Operating Systems. You can check your system information by right-clicking on the Start menu and selecting System.</li>
64
- <li>Find a reliable source for downloading eddll 3.1.5.0.exe. The best source for downloading eddll 3.1.5.0.exe is the official Dell website, where you can find the latest driver information for your system. You can also use other trusted websites that offer eddll 3.1.5.0.exe for download, but make sure to read the reviews and ratings before downloading.</li>
65
- <li>Scan the file for viruses and malware before installing. Even if you download eddll 3.1.5.0.exe from a reputable source, you should always scan it for viruses and malware before installing it on your system. You can use your antivirus software or an online scanner to do this.</li>
66
- </ol>
67
- <p>By following these steps, you can download eddll 3.1.5.0.exe safely and securely, and avoid any unwanted surprises.</p>
68
- <h2>How to use eddll 3.1.5.0.exe to update your Dell system software?</h2>
69
- <p>Once you have downloaded eddll 3.1.5.0.exe, you can use it to update your Dell system software in a few easy steps:</p>
70
- <ol>
71
- <li>Launch eddll 3.1.5.0.exe and accept the terms and conditions. Double-click on the file and follow the instructions on the screen to start the installation process. You will need to accept the terms and conditions of the Dell Software License Agreement before proceeding.</li>
72
- .0.exe will scan your system and show you a list of available updates for your system software. You can choose to install all the updates or select the ones that you want to install.</li>
73
- <li>Wait for the update process to complete and restart your system. Eddll 3.1.5.0.exe will download and install the updates for your system software. Depending on the size and number of updates, this may take some time. You will be notified when the update process is complete and you will need to restart your system for the changes to take effect.</li>
74
- </ol>
75
- <p>By following these steps, you can use eddll 3.1.5.0.exe to update your Dell system software and keep it current and compatible.</p>
76
- <h2>What are the benefits of using eddll 3.1.5.0.exe?</h2>
77
- <p>Using eddll 3.1.5.0.exe to update your Dell system software has many benefits, such as:</p>
78
- <ul>
79
- <li>Simplify the BIOS, firmware, driver, and application update experience for Dell client hardware. Eddll 3.1.5.0.exe is a stand-alone application that does not require any other software or tools to run. It automatically detects your system model and configuration and shows you the relevant updates for your system software.</li>
80
- <li>Enable security enhancement with Dell signature verification for all packages. Eddll 3.1.5.0.exe verifies the signature of all packages before installing them on your system, ensuring that they are authentic and safe.</li>
81
- .5.0.exe gives you a one hour quiet period where no updates happen automatically when you start your new system for the first time. This feature helps to enhance the Out of Box Experience (OOBE) and lets you enjoy your new system without interruptions.</li>
82
- </ul>
83
- <p>By using eddll 3.1.5.0.exe, you can enjoy these benefits and more, and keep your Dell system software up to date and secure.</p>
84
- <h2>What are the potential risks of using eddll 3.1.5.0.exe?</h2>
85
- <p>While using eddll 3.1.5.0.exe has many benefits, it also has some potential risks that you should be aware of, such as:</p>
86
- <ul>
87
- <li>Download a corrupted or infected file from an untrusted source. If you download eddll 3.1.5.0.exe from an untrusted source, you may end up with a corrupted or infected file that can harm your system or compromise your data. Therefore, you should always download eddll 3.1.5.0.exe from a reliable source and scan it for viruses and malware before installing.</li>
88
- <li>Encounter compatibility issues or errors during the update process. Sometimes, the updates for your system software may not be compatible with your system model or configuration, or may cause errors during the installation process. This can result in system instability or performance issues. Therefore, you should always check your system compatibility and requirements before downloading and installing eddll 3.1.5.0.exe.</li>
89
- .1.5.0.exe.</li>
90
- </ul>
91
- <p>By being aware of these risks and taking precautions, you can minimize the chances of encountering any problems with eddll 3.1.5.0.exe and use it safely and securely.</p>
92
- <h2>How to troubleshoot common problems with eddll 3.1.5.0.exe?</h2>
93
- <p>Even if you follow the steps and precautions mentioned above, you may still encounter some problems with eddll 3.1.5.0.exe, such as:</p>
94
- <ul>
95
- <li>Eddll 3.1.5.0.exe does not run or shows an error message.</li>
96
- <li>Eddll 3.1.5.0.exe does not find any updates or shows incorrect updates.</li>
97
- <li>Eddll 3.1.5.0.exe takes too long to download or install the updates.</li>
98
- </ul>
99
- <p>If you face any of these problems, you can try the following solutions to troubleshoot them:</p>
100
- <h3>Solution 1: Check your system compatibility and requirements again</h3>
101
- <p>Make sure that your system meets the minimum requirements for running eddll 3.1.5.0.exe, such as:</p>
102
- <ul>
103
- <li>Operating System: Microsoft Windows 64bit</li>
104
- <li>System Model: Dell client hardware</li>
105
- <li>System Configuration: compatible with the updates</li>
106
- </ul>
107
- <p>If your system does not meet these requirements, you may need to upgrade your system or use a different tool to update your system software.</p>
108
- .5.0.exe again from a different source</h3>
109
- <p>It is possible that the file that you downloaded is corrupted or incomplete, which can cause eddll 3.1.5.0.exe to not run or show an error message. To fix this, you can try to download eddll 3.1.5.0.exe again from a different source, such as the official Dell website or another trusted website. Make sure to scan the file for viruses and malware before installing it.</p>
110
- <h3>Solution 3: Contact Dell support for assistance</h3>
111
- <p>If none of the above solutions work, you may need to contact Dell support for assistance. They can help you to diagnose and resolve any issues with eddll 3.1.5.0.exe and your system software. You can contact Dell support by phone, email, chat, or online forums.</p>
112
- <h2>Conclusion</h2>
113
- <p>Eddll 3.1.5.0.exe is a stand-alone application that provides updates for system software that is released by Dell. It simplifies the BIOS, firmware, driver, and application update experience for Dell client hardware and enables security enhancement with Dell signature verification for all packages.</p>
114
- <p>However, using eddll 3.1.5.0.exe also has some potential risks, such as downloading a corrupted or infected file from an untrusted source, encountering compatibility issues or errors during the update process, or damaging your system software or hardware if the update fails or is interrupted.</p>
115
- <p>Therefore, you need to follow some steps and precautions to download and use eddll 3.1.5.0.exe safely and securely, such as checking your system compatibility and requirements, finding a reliable source for downloading eddll 3.1.5.0.exe, scanning the file for viruses and malware before installing, backing up your data and ensuring a stable power and network connection before using eddll 3.1.5.0.exe.</p>
116
- .1.5.0.exe, you can try some solutions to troubleshoot them, such as checking your system compatibility and requirements again, downloading eddll 3.1.5.0.exe again from a different source, or contacting Dell support for assistance.</p>
117
- <p>We hope that this article has helped you to understand what eddll 3.1.5.0.exe is and how to use it to update your Dell system software. If you have any questions or feedback, please feel free to leave a comment below.</p>
118
- <h2>FAQs</h2>
119
- <p>Here are some frequently asked questions about eddll 3.1.5.0.exe:</p>
120
- <ol>
121
- <li>What is the difference between eddll 3.1.5.0.exe and Dell Update?</li>
122
- <p>Eddll 3.1.5.0.exe is also known as Dell Command Update, a stand-alone application that provides updates for system software that is released by Dell. Dell Update is another application that provides updates for Dell consumer systems, such as Inspiron, XPS, Alienware, and Vostro.</p>
123
- <li>How do I know if I need to update my system software?</li>
124
- <p>You can use eddll 3.1.5.0.exe to scan your system and show you a list of available updates for your system software. You can also check the Dell website for the latest driver information for your system model and configuration.</p>
125
- <li>How often should I use eddll 3.1.5.0.exe to update my system software?</li>
126
- .1.5.0.exe to update your system software whenever there is a new update available or whenever you encounter a problem with your system software. You can also set eddll 3.1.5.0.exe to run automatically or manually according to your preference.</p>
127
- <li>Can I use eddll 3.1.5.0.exe to update other system software besides Dell?</li>
128
- <p>No, eddll 3.1.5.0.exe only provides updates for system software that is released by Dell. If you want to update other system software, such as Windows, Office, or antivirus, you need to use other tools or applications.</p>
129
- <li>Can I uninstall eddll 3.1.5.0.exe if I don't need it anymore?</li>
130
- <p>Yes, you can uninstall eddll 3.1.5.0.exe if you don't need it anymore or if you want to use a different tool to update your system software. You can uninstall eddll 3.1.5.0.exe from the Control Panel or the Settings app.</p>
131
- </ol>
132
- </p> 0a6ba089eb<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fences 3 Serial Key.md DELETED
@@ -1,143 +0,0 @@
1
-
2
- <h1>Fences 3 Serial Key: How to Download and Activate Fences 3 Software</h1>
3
- <p>If you are looking for a way to organize your desktop icons and windows in a neat and stylish manner, you might have heard of <strong>Fences 3 software</strong>. This is a popular desktop enhancement tool that allows you to create shaded areas on your desktop that you can place your icons into. You can also customize the appearance and behavior of your fences, create rules for automatic icon sorting, use snapshots to save and restore your desktop layout, and more.</p>
4
- <h2>Fences 3 serial key</h2><br /><p><b><b>Download File</b> &#187; <a href="https://byltly.com/2uKwVY">https://byltly.com/2uKwVY</a></b></p><br /><br />
5
- <p>However, before you can enjoy all these features and benefits of Fences 3 software, you need to have a <strong>serial key</strong> that allows you to activate the software on your computer. A serial key is a unique code that verifies that you have purchased a legitimate copy of the software from Stardock or an authorized reseller. Without a serial key, you cannot use Fences 3 software beyond the trial period.</p>
6
- <p>In this article, we will show you how to get a serial key for F ences 3 software, how to download and activate it with your serial key, how to use it to organize your desktop, and how to troubleshoot some common issues that you might encounter. We will also provide some tips and tricks for using Fences 3 software effectively, and answer some frequently asked questions that you might have. By the end of this article, you will be able to enjoy Fences 3 software to the fullest and make your desktop look amazing.</p>
7
- <h2>What is Fences 3 Software?</h2>
8
- <p>Fences 3 software is a desktop enhancement tool that helps you organize your desktop icons and windows into shaded areas called fences. You can create as many fences as you want, and place them anywhere on your desktop. You can also resize, move, hide, or roll up your fences as you wish. Fences 3 software also lets you customize the appearance and behavior of your fences, such as color, transparency, title, layout, sorting, and more.</p>
9
- <p>One of the most powerful features of Fences 3 software is the ability to create rules for automatic icon sorting. You can specify which icons go into which fence based on criteria such as name, type, date, size, or label. For example, you can create a rule that automatically places all your documents into a fence called Documents, or all your games into a fence called Games. This way, you don't have to manually drag and drop your icons every time you add or remove them from your desktop.</p>
10
- <p></p>
11
- <p>Another useful feature of Fences 3 software is the ability to use snapshots to save and restore your desktop layout. You can take a snapshot of your current desktop arrangement and name it as you like. You can then switch between different snapshots with a simple double-click or a hotkey. This is especially handy if you have different desktop layouts for different tasks or scenarios, such as work, gaming, or entertainment.</p>
12
- <p>Fences 3 software is compatible with Windows 10, 8.1, 8, and 7. It requires at least 1 GB of RAM and 150 MB of hard disk space. It also supports high DPI monitors and multiple monitors.</p>
13
- <h2>Why Do You Need a Serial Key to Use Fences 3 Software?</h2>
14
- <p>A serial key is a unique code that verifies that you have purchased a legitimate copy of Fences 3 software from Stardock or an authorized reseller. A serial key is usually composed of letters and numbers, such as XXXX-XXXX-XXXX-XXXX. You need a serial key to activate Fences 3 software on your computer and unlock all its features and benefits.</p>
15
- <p>Without a serial key, you can only use Fences 3 software as a trial version for 30 days. After the trial period expires, you will not be able to create new fences or modify existing ones. You will also see a watermark on your desktop that reminds you to purchase a serial key.</p>
16
- <p>A serial key is valid for one computer only. If you want to use Fences 3 software on another computer, you need to purchase another serial key or deactivate the software on the first computer and reactivate it on the second one.</p>
17
- <h2>How to Get a Serial Key for Fences 3 Software?</h2>
18
- <h3>Purchase Fences 3 Software from Stardock or Authorized Resellers</h3>
19
- <p>The easiest way to get a serial key for Fences 3 software is to purchase it from Stardock or authorized resellers. Stardock is the developer and publisher of Fences 3 software, and authorized resellers are online stores that have permission to sell Stardock products.</p>
20
- <p>You can purchase Fences 3 software from Stardock website for $9.99 USD. You can pay with credit card, PayPal, Amazon Pay, or Bitcoin. After completing the payment process, you will receive an email with your serial key and download link.</p>
21
- <p>You can also purchase Fences 3 software from authorized resellers such as Steam, Humble Bundle, or Fanatical. The price may vary depending on the reseller and the region. After purchasing Fences 3 software from an authorized reseller, you will receive an email with your serial key and download link.</p>
22
- <h3>Retrieve Your Serial Key from Stardock Support</h3>
23
- <p>If you have already purchased Fences 3 software from Stardock or an authorized reseller but have lost or forgotten your serial key, you can retrieve it from Stardock support. You will need to provide some information to verify your purchase, such as your email address, order number, or receipt.</p>
24
- <p>To retrieve your serial key from Stardock support, follow these steps:</p>
25
- <ol>
26
- <li>Go to <a href="">Stardock website</a> and click on Support <li>Select Fences 3 from the Product dropdown menu</li>
27
- <li>Click on Retrieve Serial Number</li>
28
- <li>Enter your email address, order number, or receipt and click on Submit</li>
29
- <li>You will receive an email with your serial key and download link</li>
30
- </ol>
31
- <p>If you have any issues with retrieving your serial key from Stardock support, you can contact them via email at [email protected] or via phone at 1-800-493-9662.</p>
32
- <h2>How to Download Fences 3 Software?</h2>
33
- <p>After you have purchased Fences 3 software and received your serial key, you can download it from Stardock website or other sources. The download size is about 12 MB and the installation process is simple and fast.</p>
34
- <p>To download Fences 3 software from Stardock website, follow these steps:</p>
35
- <ol>
36
- <li>Go to <a href="">Stardock website</a> and click on Downloads</li>
37
- <li>Select Fences 3 from the Product dropdown menu</li>
38
- <li>Click on Download Now</li>
39
- <li>Save the file to your computer and run it</li>
40
- <li>Follow the instructions on the screen to install Fences 3 software</li>
41
- </ol>
42
- <p>You can also download Fences 3 software from other sources, such as Steam, Humble Bundle, or Fanatical. However, you need to make sure that the source is trustworthy and that the file is not corrupted or infected with malware. You also need to enter your serial key during the installation process to activate Fences 3 software.</p>
43
- <h2>How to Activate Fences 3 Software with Your Serial Key?</h2>
44
- <h3>Online Activation</h3>
45
- <p>The easiest way to activate Fences 3 software with your serial key is to use the online activation method. This method requires an internet connection and an email address. You can activate Fences 3 software online with your serial key and email address during the installation process or after the installation process.</p>
46
- <p>To activate Fences 3 software online with your serial key and email address during the installation process, follow these steps:</p>
47
- <ol>
48
- <li>Run the installer file that you downloaded from Stardock website or other sources</li>
49
- <li>Follow the instructions on the screen until you reach the Activation screen</li>
50
- <li>Enter your serial key and email address in the fields provided</li>
51
- <li>Click on Activate Online</li>
52
- <li>You will see a message that says "Activation Successful"</li>
53
- <li>Click on Finish to complete the installation process</li>
54
- </ol>
55
- <p>To activate Fences 3 software online with your serial key and email address after the installation process, follow these steps:</p>
56
- <ol>
57
- <li>Launch Fences 3 software from your desktop or start menu</li>
58
- <li>You will see a message that says "Your trial period has expired. Please enter your product key to continue using this product."</li>
59
- <li>Click on Enter Product Key</li>
60
- <li>Enter your serial key and email address in the fields provided</li>
61
- <li>Click on Activate Online</li>
62
- <li>You will see a message that says "Activation Successful"</li>
63
- <li>Click on OK to continue using Fences 3 software</li>
64
- </ol>
65
- <h3>Offline Activation</h3>
66
- <p>If you do not have an internet connection or an email address, you can use the offline activation method. This method requires a .REG file that contains your activation information. You can create a .REG file from another computer that has an internet connection and transfer it to your computer via a USB drive or other means.</p>
67
- <p>To activate Fences 3 software offline with your serial key and a .REG file, follow these steps:</p>
68
- <ol>
69
- <li>Go to another computer that has an internet connection and open a web browser <li>Go to <a href="">Stardock website</a> and click on Support</li>
70
- <li>Select Fences 3 from the Product dropdown menu</li>
71
- <li>Click on Offline Activation</li>
72
- <li>Enter your serial key and click on Generate</li>
73
- <li>You will see a .REG file that contains your activation information</li>
74
- <li>Save the .REG file to a USB drive or other means and transfer it to your computer</li>
75
- <li>Run the installer file that you downloaded from Stardock website or other sources</li>
76
- <li>Follow the instructions on the screen until you reach the Activation screen</li>
77
- <li>Click on Activate Offline</li>
78
- <li>Browse to the location of the .REG file that you transferred to your computer and select it</li>
79
- <li>You will see a message that says "Activation Successful"</li>
80
- <li>Click on Finish to complete the installation process</li>
81
- </ol>
82
- <h2>How to Use Fences 3 Software?</h2>
83
- <p>After you have activated Fences 3 software with your serial key, you can start using it to organize your desktop icons and windows. Here are some basic steps to use Fences 3 software:</p>
84
- <ol>
85
- <li>Launch Fences 3 software from your desktop or start menu</li>
86
- <li>You will see a welcome screen that gives you some tips and options for using Fences 3 software</li>
87
- <li>You can choose to create your own fences or use the default fences that Fences 3 software provides, such as Programs, Folders, Documents, etc.</li>
88
- <li>To create your own fence, right-click on an empty area of your desktop and select Create New Fence Here</li>
89
- <li>A shaded area will appear on your desktop with a title bar that says New Fence. You can rename it by double-clicking on the title bar and typing a new name</li>
90
- <li>You can drag and drop icons from your desktop into the fence. You can also right-click on an icon and select Send To Fence to move it to a specific fence</li>
91
- <li>You can resize, move, hide, or roll up your fence by using the mouse or keyboard shortcuts. You can also right-click on the fence and select Fence Options to customize its appearance and behavior</li>
92
- <li>To create rules for automatic icon sorting, right-click on an empty area of your desktop and select Configure Fences. Then click on Sorting & Organizing tab and select Create Rule. You can specify which icons go into which fence based on criteria such as name, type, date, size, or label. You can also edit or delete existing rules from this tab</li>
93
- <li>To use snapshots to save and restore your desktop layout, right-click on an empty area of your desktop and select Configure Fences. Then click on Layouts & Snapping tab and select Take Snapshot. You can name your snapshot as you like and switch between different snapshots with a simple double-click or a hotkey. You can also edit or delete existing snapshots from this tab</li>
94
- </ol>
95
- <h2>Tips and Tricks for Using Fences 3 Software Effectively</h2>
96
- <p>Fences 3 software is a powerful and versatile tool that can help you organize your desktop in many ways. Here are some tips and tricks for using Fences 3 software effectively:</p>
97
- <ul>
98
- <li>You can use keyboard shortcuts to quickly access or modify your fences. For example, you can press Ctrl + Alt + Shift + B to show or hide all fences, Ctrl + Alt + Shift + R to roll up or down all fences, Ctrl + Alt + Shift + S to switch between different snapshots, etc. You can also customize your own keyboard shortcuts from the Configure Fences menu.</li>
99
- <li>You can use mouse gestures to quickly access or modify your fences. For example, you can double-click on an empty area of your desktop to show or hide all fences, drag an icon over a fence title bar to move it into that fence, drag a fence title bar over another fence title bar to swap their positions, etc.</li>
100
- <li>You can use quick-hide feature to temporarily hide all fences and icons on your desktop. To do this, simply move your mouse cursor to the edge of your screen where you have enabled quick-hide from the Configure Fences menu. To show them again, just move your mouse cursor away from the edge of your screen.</li>
101
- <li>You can use quick-hide feature to temporarily hide all fences and icons on your desktop. To do this, simply move your mouse cursor to the edge of your screen where you have enabled quick-hide from the Configure Fences menu. To show them again, just move your mouse cursor away from the edge of your screen.</li> <li>You can use portals feature to create a fence that shows the contents of another folder on your computer. To do this, right-click on an empty area of your desktop and select Create New Fence Here. Then right-click on the fence and select Fence Options. Then click on Portal and select Browse to choose a folder that you want to display in the fence. You can also customize the appearance and behavior of the portal fence from this menu.</li>
102
- <li>You can use desktop pages feature to create multiple virtual desktops that you can switch between with a mouse wheel or a hotkey. To do this, right-click on an empty area of your desktop and select Configure Fences. Then click on Desktop Pages tab and enable the feature. You can also customize the number and layout of your desktop pages from this tab.</li>
103
- <li>You can use folder portals feature to create a fence that shows the contents of another folder on your computer. To do this, right-click on an empty area of your desktop and select Create New Fence Here. Then right-click on the fence and select Fence Options. Then click on Portal and select Browse to choose a folder that you want to display in the fence. You can also customize the appearance and behavior of the portal fence from this menu.</li>
104
- </ul>
105
- <h2>Troubleshooting Common Issues with Fences 3 Software</h2>
106
- <p>Fences 3 software is a reliable and stable tool that works well with most Windows systems. However, you might encounter some issues with Fences 3 software from time to time, such as activation errors, compatibility issues, performance issues, etc. Here are some solutions for troubleshooting common issues with Fences 3 software:</p>
107
- <ul>
108
- <li>If you have trouble activating Fences 3 software with your serial key, make sure that you have entered the correct serial key and email address. Also, make sure that you have an internet connection if you are using the online activation method. If you are using the offline activation method, make sure that you have transferred the .REG file correctly and selected it during the activation process.</li>
109
- <li>If you have trouble downloading or installing Fences 3 software, make sure that you have enough disk space and memory on your computer. Also, make sure that you have downloaded the file from a trustworthy source and that it is not corrupted or infected with malware. If you have downloaded the file from Stardock website or an authorized reseller, you can verify the file integrity by checking its MD5 checksum.</li>
110
- <li>If you have trouble using Fences 3 software, make sure that it is compatible with your Windows version and system settings. Also, make sure that it is not conflicting with other software or hardware on your computer. You can try to update Fences 3 software to the latest version, disable or uninstall any conflicting software or hardware, or run Fences 3 software in compatibility mode or as an administrator.</li>
111
- <li>If you have any other issues with Fences 3 software, you can contact Stardock support via email at [email protected] or via phone at 1-800-493-9662. You can also visit Stardock website and check their knowledge base, forums, or FAQs for more information and solutions.</li>
112
- </ul>
113
- <h2>Conclusion</h2>
114
- <p>Fences 3 software is a great tool that can help you organize your desktop icons and windows in a neat and stylish manner. It allows you to create shaded areas on your desktop that you can place your icons into, customize their appearance and behavior, create rules for automatic icon sorting, use snapshots to save and restore your desktop layout, and more.</p>
115
- <p>To use Fences 3 software, you need to have a serial key that verifies that you have purchased a legitimate copy of the software from Stardock or an authorized reseller. You can purchase Fences 3 software from Stardock website or authorized resellers for $9.99 USD. You can also retrieve your serial key from Stardock support if you have lost or forgotten it.</p>
116
- <p>After purchasing Fences 3 software and receiving your serial key, you can download it from Stardock website or other sources. You can then activate it online or offline with your serial key and email address. You can then start using it to organize your desktop icons and windows.</p>
117
- <p>We hope that this article has helped you understand how to download and activate Fences 3 software with your serial key, how to use it to organize your desktop, and how to troubleshoot some common issues that you might encounter. We also hope that you have learned some tips and tricks for using Fences 3 software effectively.</p>
118
- <p>If you have any questions or feedback about Fences 3 software or this article, please feel free to leave a comment below or contact us via email or phone. We would love to hear from you and help you out.</p>
119
- <p>Thank you for reading this article and happy fencing!</p>
120
- <h2>FAQs</ <h2>FAQs</h2>
121
- <p>Here are some frequently asked questions about Fences 3 software that you might find helpful:</p>
122
- <ol>
123
- <li>What is the difference between Fences 3 and Fences 2?</li>
124
- <p>Fences 3 is the latest version of Fences software that has some new and improved features and benefits compared to Fences 2. Some of the main differences are:</p>
125
- <ul>
126
- <li>Fences 3 supports Windows 10, 8.1, 8, and 7, while Fences 2 only supports Windows 8 and 7</li>
127
- <li>Fences 3 supports high DPI monitors and multiple monitors, while Fences 2 does not</li>
128
- <li>Fences 3 has a new user interface and design that is more modern and intuitive, while Fences 2 has an older and simpler user interface and design</li>
129
- <li>Fences 3 has more options and customization for fences, such as color, transparency, title, layout, sorting, etc., while Fences 2 has fewer options and customization for fences</li>
130
- <li>Fences 3 has more features and functionality for desktop organization, such as rules, snapshots, desktop pages, portals, etc., while Fences 2 has fewer features and functionality for desktop organization</li>
131
- </ul>
132
- <p>If you have Fences 2 software and want to upgrade to Fences 3 software, you can do so from Stardock website for $4.99 USD.</p>
133
- <li>How many computers can I use Fences 3 software on with one serial key?</li>
134
- <p>You can use Fences 3 software on one computer only with one serial key. If you want to use Fences 3 software on another computer, you need to purchase another serial key or deactivate the software on the first computer and reactivate it on the second one.</p>
135
- <li>How can I backup or restore my fences settings?</li>
136
- <p>You can backup or restore your fences settings by using the export or import feature from the Configure Fences menu. To do this, right-click on an empty area of your desktop and select Configure Fences. Then click on Backup & Restore tab and select Export or Import. You can choose to export or import all your fences settings or specific ones. You can also choose the location where you want to save or load your fences settings.</p>
137
- <li>How can I uninstall Fences 3 software?</li>
138
- <p>You can uninstall Fences 3 software by using the uninstaller file that comes with the software or by using the Windows Control Panel. To use the uninstaller file, go to the folder where you installed Fences 3 software and run the file called Uninstall.exe. To use the Windows Control Panel, go to Start > Settings > Apps > Apps & Features and find Fences 3 software from the list. Then click on Uninstall and follow the instructions on the screen.</p>
139
- <li>How can I get help or support for Fences 3 software?</li>
140
- <p>You can get help or support for Fences 3 software by contacting Stardock support via email at [email protected] or via phone at 1-800-493-9662. You can also visit Stardock website and check their knowledge base, forums, or FAQs for more information and solutions.</p>
141
- </ol></p> b2dd77e56b<br />
142
- <br />
143
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dr.Fone 9.6.2 Crack With Registration Codes Full Free Download VERIFIED.md DELETED
@@ -1,39 +0,0 @@
1
-
2
- <h1>Dr.Fone 9.6.2 Crack With Registration Codes Full Free Download</h1>
3
- <p>Dr.Fone is a powerful data recovery software that can help you recover lost or deleted files from your Android or iOS devices. Whether you accidentally deleted photos, videos, contacts, messages, notes, or other important data, Dr.Fone can scan your device and restore them in minutes. Dr.Fone also supports backup and restore, data transfer, screen unlock, system repair, and other useful features.</p>
4
- <h2>Dr.Fone 9.6.2 Crack With Registration Codes Full Free Download</h2><br /><p><b><b>Download Zip</b> &#127383; <a href="https://imgfil.com/2uxYAf">https://imgfil.com/2uxYAf</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download and install Dr.Fone 9.6.2 crack with registration codes full free. This is the latest version of Dr.Fone that has been tested and verified to work on Windows and Mac OS. With Dr.Fone 9.6.2 crack, you can enjoy all the premium features of Dr.Fone without paying anything.</p>
6
- <h2>How to Download Dr.Fone 9.6.2 Crack With Registration Codes Full Free</h2>
7
- <p>To download Dr.Fone 9.6.2 crack with registration codes full free, you need to follow these steps:</p>
8
- <ol>
9
- <li>Click on the link below to download the Dr.Fone 9.6.2 crack file.</li>
10
- <li>Extract the file using WinRAR or any other extraction tool.</li>
11
- <li>Run the setup file and follow the instructions to install Dr.Fone on your computer.</li>
12
- <li>Copy the crack file and paste it into the installation folder of Dr.Fone.</li>
13
- <li>Launch Dr.Fone and enter one of the registration codes below to activate it.</li>
14
- </ol>
15
- <p>Here are some registration codes that you can use:</p>
16
- <ul>
17
- <li>DRFONE-1234-5678-9012-3456</li>
18
- <li>DRFONE-7890-1234-5678-9012</li>
19
- <li>DRFONE-3456-7890-1234-5678</li>
20
- </ul>
21
- <h2>Why Choose Dr.Fone 9.6.2 Crack With Registration Codes Full Free</h2>
22
- <p>Dr.Fone 9.6.2 crack with registration codes full free is a great choice for anyone who wants to recover their lost or deleted data from their devices. Here are some of the benefits of using Dr.Fone 9.6.2 crack:</p>
23
- <p></p>
24
- <ul>
25
- <li>It supports over 6000 Android and iOS devices, including Samsung, Huawei, LG, iPhone, iPad, iPod, etc.</li>
26
- <li>It can recover various types of data, such as photos, videos, music, contacts, messages, WhatsApp, documents, etc.</li>
27
- <li>It can recover data from different scenarios, such as accidental deletion, factory reset, system crash, virus attack, water damage, etc.</li>
28
- <li>It can backup and restore your data to your computer or another device with one click.</li>
29
- <li>It can transfer data between different devices or platforms without any hassle.</li>
30
- <li>It can unlock your screen if you forgot your password or pattern.</li>
31
- <li>It can fix various system issues on your device, such as stuck on logo, black screen, boot loop, etc.</li>
32
- <li>It has a user-friendly interface that is easy to use for anyone.</li>
33
- <li>It has a high success rate and fast speed for data recovery.</li>
34
- <li>It is safe and secure to use without any virus or malware.</li>
35
- </ul>
36
- <h2>Conclusion</h2>
37
- <p>If you are looking for a reliable and effective data recovery software for your Android or iOS devices, you should try Dr.Fone 9.6.2 crack with registration codes full free. It can help you recover your lost or deleted data in minutes and also provide you with other useful features to manage your device. Download Dr.Fone 9.6.2 crack with registration codes full free today and enjoy its benefits!</p> d5da3c52bf<br />
38
- <br />
39
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bike 3D Game Race Stunt and Customize Your Motorbike.md DELETED
@@ -1,151 +0,0 @@
1
-
2
- <table>
3
- <tr>
4
- <td>
5
- <h1>Bike 3D Game: A Guide for Beginners</h1>
6
- <p>Do you love bikes and want to experience the thrill of riding them in a virtual world? If so, you should try playing a bike 3D game. A bike 3D game is a type of video game that simulates bike riding in a three-dimensional environment. You can choose from different types of bikes, such as racing bikes, stunt bikes, or police bikes, and explore various maps and locations, such as cities, deserts, or mountains. You can also perform amazing tricks and stunts, compete with other players online, or complete challenging missions and achievements.</p>
7
- <h2>bike 3d game</h2><br /><p><b><b>Download</b> &#10037; <a href="https://jinyurl.com/2uNML4">https://jinyurl.com/2uNML4</a></b></p><br /><br />
8
- <p>A bike 3D game is not only fun and exciting, but also beneficial for your brain and skills. Playing a bike 3D game can improve your hand-eye coordination, reaction time, spatial awareness, problem-solving, creativity, and concentration. It can also reduce stress, enhance mood, boost confidence, and provide a sense of accomplishment.</p>
9
- <p>In this article, we will guide you through everything you need to know about bike 3D games. We will cover the types of bike 3D games, their features, how to play them, and some of the best bike 3D games to try. By the end of this article, you will be ready to hop on your virtual bike and have a blast!</p>
10
- <h2>Types of Bike 3D Games</h2>
11
- <h3>Racing Bike Games</h3>
12
- <p>If you are into speed and adrenaline, racing bike games are for you. These games let you race with other bikers on various tracks and courses. You can choose from different modes, such as time trial, lap race, elimination race, or tournament. You can also customize your bike's appearance and performance to suit your preferences. Some examples of racing bike games are MotoGP Racing Championship Quest, SBK Official Mobile Game, or Traffic Rider[^3^ ).</p>
13
- <p>bike 3d game online<br />
14
- bike 3d game download<br />
15
- bike 3d game free<br />
16
- bike 3d game for pc<br />
17
- bike 3d game android<br />
18
- bike 3d game apk<br />
19
- bike 3d game crazy games<br />
20
- bike 3d game play now<br />
21
- bike 3d game y8<br />
22
- bike 3d game unblocked<br />
23
- bike 3d game simulator<br />
24
- bike 3d game racing<br />
25
- bike 3d game stunt<br />
26
- bike 3d game dirt<br />
27
- bike 3d game mountain<br />
28
- bike 3d game moto<br />
29
- bike 3d game super<br />
30
- bike 3d game extreme<br />
31
- bike 3d game trial<br />
32
- bike 3d game bmx<br />
33
- bike 3d game city<br />
34
- bike 3d game offroad<br />
35
- bike 3d game highway<br />
36
- bike 3d game traffic<br />
37
- bike 3d game police<br />
38
- bike 3d game zombie<br />
39
- bike 3d game adventure<br />
40
- bike 3d game action<br />
41
- bike 3d game sports<br />
42
- bike 3d game multiplayer<br />
43
- bike 3d game html5<br />
44
- bike 3d game webgl<br />
45
- bike 3d game unity<br />
46
- bike 3d game unreal engine<br />
47
- bike 3d game steam<br />
48
- bike 3d game ps4<br />
49
- bike 3d game xbox one<br />
50
- bike 3d game switch<br />
51
- bike 3d game vr<br />
52
- bike 3d game ar<br />
53
- bike 3d game review<br />
54
- bike 3d game best<br />
55
- bike 3d game new<br />
56
- bike 3d game latest<br />
57
- bike 3d game upcoming<br />
58
- bike 3d game mod apk<br />
59
- bike 3d game hack version<br />
60
- bike 3d game cheat codes<br />
61
- bike 3d game tips and tricks</p>
62
- <h3>Stunt Bike Games</h3>
63
- <p>If you are into creativity and excitement, stunt bike games are for you. These games let you perform incredible tricks and stunts with your bike on various ramps and obstacles. You can choose from different modes, such as freestyle, career, or challenge. You can also customize your bike's appearance and performance to suit your style. Some examples of stunt bike games are Bike Stunt 3D, Mad Skills BMX 2, or Bike Race Free.</p>
64
- <h3>Police Bike Games</h3>
65
- <p>If you are into action and adventure, police bike games are for you. These games let you play as a police officer on a bike and chase down criminals and lawbreakers. You can choose from different modes, such as patrol, pursuit, or arrest. You can also customize your bike's appearance and performance to suit your mission. Some examples of police bike games are Police Motorbike Simulator 3D, Police Bike City Simulator, or Police Bike Racing Free.</p>
66
- <h2>Features of Bike 3D Games</h2>
67
- <h3>Realistic Graphics and Physics</h3>
68
- <p>One of the main features of bike 3D games is their realistic graphics and physics. These games use advanced 3D technology to create stunning visuals and animations that make you feel like you are really riding a bike. You can see the details of your bike, the environment, and the other characters. You can also experience the effects of gravity, friction, inertia, and momentum on your bike's movement and behavior.</p>
69
- <h3>Customizable Bikes and Riders</h3>
70
- <p>Another feature of bike 3D games is their customizable bikes and riders. These games allow you to personalize your bike and rider to match your taste and personality. You can change the color, design, shape, size, and parts of your bike. You can also change the appearance, outfit, accessories, and skills of your rider. You can unlock new bikes and riders by earning coins, gems, stars, or trophies in the game.</p>
71
- <h3>Diverse Maps and Environments</h3>
72
- <p>A third feature of bike 3D games is their diverse maps and environments. These games offer you a variety of maps and locations to explore and enjoy with your bike. You can ride on different terrains, such as asphalt, dirt, sand, snow, or grass. You can also ride in different settings, such as urban, rural, desert, mountain, or forest. Each map and environment has its own challenges, obstacles, hazards, and secrets to discover.</p>
73
- <h3>Multiplayer and Online Modes</h3>
74
- <p>A fourth feature of bike 3D games is their multiplayer and online modes. These games enable you to play with other bikers from around the world or with your friends locally. You can join online races, tournaments, leagues, or clans. You can also chat with other players, send them messages, gifts, or challenges. You can also create your own custom maps and share them with other players.</p>
75
- <h2>How to Play Bike 3D Games</h2>
76
- <h3>Controls and Tips</h3>
77
- <p>The controls of bike 3D games vary depending on the game and the device you are using. However, most games use similar basic controls that are easy to learn and master. Here are some common controls and tips for playing bike 3D games:</p>
78
- <ul>
79
- <li>To accelerate or brake your bike, use the up or down arrow keys on your keyboard or the right or left pedals on your screen.</li>
80
- <li>To steer or balance your bike, use the left or right arrow keys on your keyboard or tilt your device left or right.</li>
81
- <li>To perform tricks or stunts with your bike, use the spacebar on your keyboard or tap the screen.</li>
82
- <li>To pause or resume the game, use the esc key on your keyboard or tap the pause button on your screen.</li>
83
- <li>To change the camera angle or view, use the C key on your keyboard or swipe the screen.</li>
84
- <li>To boost your speed or power, use the X key on your keyboard or tap the boost button on your screen.</li>
85
- <li>To customize your bike or rider, use the mouse on your computer or tap the menu button on your screen.</li>
86
- </ul>
87
- <p>Some tips for playing bike 3D games are:</p>
88
- <ul>
89
- <li>Practice before playing in competitive modes to improve your skills and confidence.</li>
90
- <li>Follow the instructions and hints given by the game to complete the objectives and missions.</li>
91
- <li>Collect coins, gems, stars, trophies, or other items along the way to unlock new bikes, riders, maps, or features.</li>
92
- <li>Avoid crashing into obstacles, hazards, or other bik ers, as they will slow you down or damage your bike.</li>
93
- <li>Use the boost or power button wisely, as they have limited use and need time to recharge.</li>
94
- <li>Try different tricks and stunts to earn more points and impress the audience.</li>
95
- </ul>
96
- <h3>Tricks and Stunts</h3>
97
- <p>One of the most fun and rewarding aspects of bike 3D games is performing tricks and stunts with your bike. These are special maneuvers that involve flipping, spinning, jumping, or flying with your bike. They can increase your score, speed, or power, as well as make the game more exciting and enjoyable.</p>
98
- <p>There are many types of tricks and stunts that you can do with your bike, depending on the game and the map. Here are some common tricks and stunts that you can try:</p>
99
- <ul>
100
- <li>Wheelie: Lifting the front wheel of your bike and riding on the rear wheel only.</li>
101
- <li>Stopie: Lifting the rear wheel of your bike and riding on the front wheel only.</li>
102
- <li>Bunny hop: Jumping with your bike without using a ramp or an obstacle.</li>
103
- <li>Backflip: Rotating your bike 360 degrees backward in the air.</li>
104
- <li>Frontflip: Rotating your bike 360 degrees forward in the air.</li>
105
- <li>Barrel roll: Rotating your bike 360 degrees sideways in the air.</li>
106
- <li>Tailwhip: Spinning your bike around your body in the air.</li>
107
- <li>No hander: Taking both hands off the handlebars in the air.</li>
108
- <li>No footer: Taking both feet off the pedals in the air.</li>
109
- <li>Superman: Stretching your body and legs behind your bike in the air.</li>
110
- </ul>
111
- <p>To perform tricks and stunts with your bike, you need to use the spacebar on your keyboard or tap the screen. You also need to use the arrow keys on your keyboard or tilt your device to control the direction and angle of your bike. You need to time your tricks and stunts well, as they require speed, height, and balance. You also need to land safely on your wheels, or else you will crash and lose points.</p>
112
- <h3>Challenges and Achievements</h3>
113
- <p>A final aspect of bike 3D games is completing challenges and achievements. These are specific goals or tasks that you need to accomplish in the game. They can range from simple to complex, easy to hard, or short to long. They can test your skills, knowledge, or endurance. They can also reward you with coins, gems, stars, trophies, or other items.</p>
114
- <p>There are many types of challenges and achievements that you can complete in bike 3D games, depending on the game and the mode. Here are some common challenges and achievements that you can try:</p>
115
- <ul>
116
- <li>Finish a race or a level in a certain time or position.</li>
117
- <li>Collect a certain number or type of items along the way.</li>
118
- <li>Perform a certain number or type of tricks or stunts.</li>
119
- <li>Avoid crashing or damaging your bike for a certain distance or duration.</li>
120
- <li>Catch or escape from a certain number or type of enemies or opponents.</li>
121
- <li>Unlock a certain number or type of bikes, riders, maps, or features.</li>
122
- <li>Earn a certain number or type of points, coins, gems, stars, trophies, or other items.</li>
123
- </ul>
124
- <p>To complete challenges and achievements in bike 3D games, you need to follow the instructions and hints given by the game. You also need to use your skills, strategies, and resources wisely. You need to be persistent and patient, as some challenges and achievements may take multiple attempts or sessions to complete. You also need to have fun and enjoy the process, as completing challenges and achievements can make you feel proud and satisfied.</p>
125
- <h2>Best Bike 3D Games to Try</h2>
126
- <h3>Moto X3M</h3>
127
- <p>Moto X3M is one of the most popular and addictive bike 3D games available online. It is a racing game that features over 20 levels of extreme biking action. You can ride through various terrains and environments, such as beaches, caves, forests, or snow. You can also perform amazing tricks and stunts along the way. You can unlock new bikes and riders by completing levels and earning stars. You can also compete with other players on leaderboards and achievements.</p>
128
- <h3>3D Moto Simulator 2</h3>
129
- <p>3D Moto Simulator 2 is another great bike 3D game that you can play online. It is a simulation game that lets you explore three different open-world maps with your bike. You can choose from different bikes, such as sports bikes, police bikes, or dirt bikes, and customize their appearance and performance. You can also perform various tricks and stunts with your bike. You can enjoy the realistic graphics and physics of the game. You can also interact with other players online or play with your friends locally.</p>
130
- <h3>Riding Extreme 3D</h3>
131
- <p>Riding Extreme 3D is a new and exciting bike 3D game that you can download on your mobile device. It is a racing game that lets you compete with other bikers on different tracks and courses. You can choose from different modes, such as career, quick race, or multiplayer. You can also upgrade your bike's engine, brakes, tires, or suspension. You can also perform stunning tricks and stunts with your bike. You can enjoy the smooth controls and the dynamic music of the game. You can also challenge your friends or other players online.</p>
132
- <h2>Conclusion</h2>
133
- <p>Bike 3D games are a type of video game that simulates bike riding in a three-dimensional environment. They are fun, exciting, and beneficial for your brain and skills. They offer you various types of bikes, features, modes, maps, and challenges to enjoy and explore. They also allow you to customize your bike and rider, perform tricks and stunts, and play with other players online or offline.</p>
134
- <p>If you are looking for a new and thrilling way to spend your free time, you should try playing a bike 3D game. You will not regret it. You will have a blast!</p>
135
- <p>So what are you waiting for? Grab your virtual bike and start riding!</p>
136
- <h2>FAQs</h2>
137
- <p>Here are some frequently asked questions about bike 3D games:</p>
138
- <ol>
139
- <li>What are the best devices to play bike 3D games on?</li>
140
- <p>The best devices to play bike 3D games on are computers or laptops with high-speed internet connection and good graphics card. You can also play bike 3D games on smartphones or tablets with touch screen and accelerometer.</p>
141
- <li>How much do bike 3D games cost?</li>
142
- <p>Some bike 3D games are free to play online or download on your device. Some bike 3D games may require a one-time purchase or a subscription fee to access all the features and content. Some bike 3D games may also have in-app purchases or ads to generate revenue.</p>
143
- <li>Are bike 3D games safe for kids?</li>
144
- <p>Most bike 3D games are safe for kids, as they do not contain violence, gore, or inappropriate language. However, some bike 3D games may have realistic crashes or injuries that may be disturbing for some kids. Some bike 3D games may also have online chat or social features that may expose kids to strangers or cyberbullying. Therefore, parents should supervise their kids when playing bike 3D games and set parental controls if needed.</p>
145
- <li>Are bike 3D games addictive?</li>
146
- <p>Bike 3D games can be addictive, as they are fun, challenging, and rewarding. They can also trigger the release of dopamine in the brain, which is a chemical that makes you feel happy and motivated. However, playing bike 3D games excessively can have negative effects on your physical and mental health, such as eye strain, headache, neck pain, back pain, insomnia, anxiety, depression, or isolation. Therefore, you should limit your playing time and take breaks regularly.</p>
147
- <li>How can I improve my skills in bike 3D games?</li>
148
- <p>You can improve your skills in bike 3D games by practicing regularly, learning from your mistakes, watching tutorials or videos of other players, reading tips and guides online, joining forums or communities of other players, asking for feedback or advice from other players, or playing with more experienced players.</p>
149
- </ol></p> 401be4b1e0<br />
150
- <br />
151
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Mod Truckers of Europe 3 The Best Truck Simulator Game Ever.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Download Mod Truckers of Europe 3: A Guide for Trucking Enthusiasts</h1>
3
- <p>If you love driving trucks across realistic European roads and delivering various cargoes, then you might have heard of Truckers of Europe 3, a popular truck simulator game for Android devices. But did you know that you can make your trucking experience even more fun and immersive by downloading mods for Truckers of Europe 3?</p>
4
- <p>Mods are modifications or additions to the original game that can change or improve various aspects of the gameplay, such as graphics, physics, sounds, vehicles, maps, traffic, weather, and more. In this article, we will show you how to download mods for Truckers of Europe 3, what are some of the benefits and risks of using mods, and what are some of the best mods that you can try right now. So buckle up and get ready to become the king of the road with Mod Truckers of Europe 3!</p>
5
- <h2>download mod truckers of europe 3</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash; <a href="https://jinyurl.com/2uNSf6">https://jinyurl.com/2uNSf6</a></b></p><br /><br />
6
- <h2>What are mods and how do they enhance your gaming experience?</h2>
7
- <p>Mods are short for modifications, which are changes or additions to the original game that can alter or enhance various aspects of the gameplay. Mods are usually created by fans or developers who want to customize or improve their gaming experience. Mods can range from simple tweaks to major overhauls, depending on the scope and complexity of the mod.</p>
8
- <h3>Definition and types of mods</h3>
9
- <p>There are many types of mods for Truckers of Europe 3, but they can be broadly categorized into two groups: cosmetic mods and gameplay mods. Cosmetic mods are mods that change the appearance or sound of the game, such as skins, textures, models, animations, music, sound effects, etc. Gameplay mods are mods that change the mechanics or features of the game, such as physics, vehicles, trailers, cargoes, maps, traffic, weather, missions, etc.</p>
10
- <h3>Benefits of using mods for Truckers of Europe 3</h3>
11
- <p>Using mods for Truckers of Europe 3 can have many benefits for your gaming experience. Some of the benefits are:</p>
12
- <ul>
13
- <li>You can customize your truck and trailer according to your preferences and style. You can choose from different colors, designs, logos, accessories, etc.</li>
14
- <li>You can drive more realistic and diverse trucks with different chassis configurations, engine sounds, interiors, etc.</li>
15
- <li>You can haul more challenging and varied cargoes with different weights, sizes, shapes, etc.</li>
16
- <li>You can explore new and detailed maps with different terrains, landmarks, roads, etc.</li>
17
- <li>You can experience more realistic and dynamic traffic with different vehicles, behaviors, rules, etc.</li>
18
- <li>You can enjoy different weather conditions and time cycles with realistic effects on visibility, traction, etc.</li>
19
- <li>You can have more fun and challenge with different missions and scenarios that test your skills and knowledge.</li>
20
- </ul>
21
- <h3>Risks and precautions of using mods for Truckers of Europe 3</h3>
22
- <p>Using mods for Truckers of Europe 3 can also have some risks and drawbacks for your gaming experience. Some of the risks are:</p>
23
- <ul>
24
- <li>You may encounter compatibility issues or conflicts between different mods or between mods and the original game. This may cause crashes, glitches, errors, etc.</li>
25
- <li <li>You may violate the terms of service or the intellectual property rights of the original game or the mod creators. This may result in legal actions, bans, or penalties.</li>
26
- <li>You may compromise the security or performance of your device by downloading mods from untrusted sources or by installing malicious software. This may result in data loss, malware infection, device damage, etc.</li>
27
- </ul>
28
- <p>To avoid or minimize these risks, you should take some precautions when using mods for Truckers of Europe 3. Some of the precautions are:</p>
29
- <ul>
30
- <li>You should always backup your game files and data before installing any mods. This way, you can restore your game to its original state if something goes wrong.</li>
31
- <li>You should only download mods from reputable and verified sources, such as official websites, forums, or app stores. You should also check the ratings, reviews, and comments of other users before downloading any mods.</li>
32
- <li>You should always read the description, instructions, and requirements of the mods carefully before installing them. You should also follow the installation steps correctly and use compatible versions of the game and the mods.</li>
33
- <li>You should not use too many mods at once or use mods that are incompatible with each other or with the original game. You should also disable or uninstall any mods that are causing problems or that you no longer need.</li>
34
- <li>You should respect the rights and credits of the original game and the mod creators. You should not claim ownership, distribute, or modify any mods without permission from the authors.</li>
35
- </ul>
36
- <h2>How to download mods for Truckers of Europe 3?</h2>
37
- <p>Downloading mods for Truckers of Europe 3 is not very difficult, but it may vary depending on the source and the type of the mod. Here are some general steps that you can follow to download and install mods for Truckers of Europe 3:</p>
38
- <ol>
39
- <li>Find a mod that you like from a reliable source, such as [Mod Truckers of Europe 3], [Truck Simulator Mods], or [Google Play Store].</li>
40
- <li>Download the mod file to your device. The mod file may be in different formats, such as APK, ZIP, RAR, etc.</li>
41
- <li>If the mod file is in APK format, you can simply tap on it and install it like any other app. If the mod file is in ZIP or RAR format, you need to extract it using a file manager app or a zip extractor app.</li>
42
- <li>After extracting the mod file, you will see a folder with the name of the mod. Inside this folder, you will find one or more files with extensions such as .scs, .zip, .rar, etc. These are the actual mod files that you need to copy or move to your game folder.</li>
43
- <li>To find your game folder, you need to go to your device's internal storage and look for a folder named Android/data/com.truckersofeurope3/files/mods. If you don't see this folder, you need to create it manually.</li>
44
- <li>Paste or move the mod files that you extracted earlier to this folder. Make sure that you don't change the names or extensions of these files.</li>
45
- <li>Launch your game and go to the settings menu. There you will see an option called "Mod Manager". Tap on it and you will see a list of all the mods that you have installed. You can enable or disable any mod by tapping on its name.</li>
46
- <li>Enjoy your game with your new mods!</li>
47
- </ol>
48
- <h2>What are some of the best mods for Truckers of Europe 3?</h2>
49
- <p>There are hundreds of mods for Truckers of Europe 3 that you can choose from, but some of them are more popular and recommended than others. Here are some of the best mods for Truckers of Europe 3 that you can try right now:</p>
50
- <p>download mod truckers of europe 3 apk<br />
51
- download mod truckers of europe 3 for android<br />
52
- download mod truckers of europe 3 free<br />
53
- download mod truckers of europe 3 unlimited money<br />
54
- download mod truckers of europe 3 latest version<br />
55
- download mod truckers of europe 3 happymod<br />
56
- download mod truckers of europe 3 offline<br />
57
- download mod truckers of europe 3 full version<br />
58
- download mod truckers of europe 3 hack<br />
59
- download mod truckers of europe 3 cheat<br />
60
- download mod truckers of europe 3 simulator<br />
61
- download mod truckers of europe 3 realistic physics<br />
62
- download mod truckers of europe 3 open world<br />
63
- download mod truckers of europe 3 new trucks<br />
64
- download mod truckers of europe 3 new cities<br />
65
- download mod truckers of europe 3 gameplay<br />
66
- download mod truckers of europe 3 review<br />
67
- download mod truckers of europe 3 trailer<br />
68
- download mod truckers of europe 3 tips and tricks<br />
69
- download mod truckers of europe 3 guide<br />
70
- download mod truckers of europe 3 best settings<br />
71
- download mod truckers of europe 3 how to install<br />
72
- download mod truckers of europe 3 how to play<br />
73
- download mod truckers of europe 3 how to make money<br />
74
- download mod truckers of europe 3 how to upgrade trucks<br />
75
- download mod truckers of europe 3 how to customize trucks<br />
76
- download mod truckers of europe 3 how to deliver cargo<br />
77
- download mod truckers of europe 3 how to unlock new trucks<br />
78
- download mod truckers of europe 3 how to unlock new cities<br />
79
- download mod truckers of europe 3 how to change camera view<br />
80
- download mod truckers of europe 3 comparison with other games<br />
81
- download mod truckers of europe 3 pros and cons<br />
82
- download mod truckers of europe 3 features and benefits<br />
83
- download mod truckers of europe 3 requirements and compatibility<br />
84
- download mod truckers of europe 3 updates and news<br />
85
- download mod truckers of europe 3 support and feedback<br />
86
- download mod truckers of europe 3 alternatives and similar games<br />
87
- download mod truckers of europe 3 ratings and reviews<br />
88
- download mod truckers of europe 3 downloads and installs<br />
89
- download mod truckers of europe 3 screenshots and videos</p>
90
- <table>
91
- <tr><th>Name</th><th>Description</th><th>Link</th></tr>
92
- <tr><td>Realistic Graphics Mod</td><td>This mod improves the graphics quality and realism of Truckers of Europe 3 by adding new textures, lighting effects, shadows, reflections, etc. It also enhances the weather system and adds realistic raindrops and fog effects.</td><td></td></tr>
93
- <tr><td>Realistic Physics Mod</td><td>This mod improves the physics and handling of Truckers of Europe 3 by adding new suspension settings, brake force settings, engine torque settings, etc. It also adds realistic tire wear and fuel consumption effects.</td><td></td></tr>
94
- <tr><td>Realistic Traffic Mod</td><td>This mod improves the traffic density and diversity of Truckers of Europe 3 by adding new vehicles, models, colors, behaviors , etc. It also adds realistic traffic rules and speed limits.</td><td></td></tr>
95
- <tr><td>Realistic Sound Mod</td><td>This mod improves the sound quality and realism of Truckers of Europe 3 by adding new engine sounds, horn sounds, brake sounds, etc. It also adds realistic ambient sounds, such as wind, rain, birds, etc.</td><td></td></tr>
96
- <tr><td>Realistic Truck Mod</td><td>This mod improves the truck variety and realism of Truckers of Europe 3 by adding new trucks, models, skins, interiors, etc. It also adds realistic truck features, such as dashboard indicators, mirrors, lights, etc.</td><td></td></tr>
97
- <tr><td>Realistic Trailer Mod</td><td>This mod improves the trailer variety and realism of Truckers of Europe 3 by adding new trailers, models, skins, cargoes, etc. It also adds realistic trailer features, such as coupling, weight distribution, etc.</td><td></td></tr>
98
- <tr><td>Realistic Map Mod</td><td>This mod improves the map size and realism of Truckers of Europe 3 by adding new regions, cities, roads, landmarks, etc. It also adds realistic map features, such as tolls, borders, signs, etc.</td><td></td></tr>
99
- </table>
100
- <h2>Conclusion: Enjoy the ultimate trucking simulation with Mod Truckers of Europe 3</h2>
101
- <p>In conclusion, Mod Truckers of Europe 3 is a great way to enhance your trucking experience and enjoy the ultimate truck simulator game for Android devices. By downloading mods for Truckers of Europe 3, you can customize and improve various aspects of the gameplay, such as graphics, physics, sounds, vehicles, trailers, maps, traffic, weather, missions, and more. You can also find and install mods easily from different sources and manage them with the mod manager feature in the game settings. However, you should also be aware of the risks and precautions of using mods for Truckers of Europe 3 and follow some tips and tricks to avoid or minimize any problems or issues. We hope that this article has helped you learn how to download mods for Truckers of Europe 3 and what are some of the best mods that you can try right now. So what are you waiting for? Download Mod Truckers of Europe 3 today and become the king of the road!</p>
102
- <h2>FAQs: Frequently Asked Questions about Mod Truckers of Europe 3</h2>
103
- <p>Here are some of the most common questions and answers about Mod Truckers of Europe 3:</p>
104
- <h4>Q: Do I need to root my device to use mods for Truckers of Europe 3?</h4>
105
- <p>A: No, you don't need to root your device to use mods for Truckers of Europe 3. You can simply download and install mods from different sources and copy or move them to your game folder.</p>
106
- <h4>Q: Will using mods for Truckers of Europe 3 affect my game progress or achievements?</h4>
107
- <p>A: No, using mods for Truckers of Europe 3 will not affect your game progress or achievements. You can still save your game data and unlock achievements as usual.</p>
108
- <h4>Q: How can I update or uninstall mods for Truckers of Europe 3?</h4>
109
- <p>A: To update or uninstall mods for Truckers of Europe 3, you need to go to your game folder and delete or replace the mod files that you want to update or uninstall. You can also use the mod manager feature in the game settings to enable or disable any mod.</p>
110
- <h4>Q: How can I report a bug or a problem with a mod for Truckers of Europe 3?</h4>
111
- <p>A: To report a bug or a problem with a mod for Truckers of Europe 3, you need to contact the mod creator directly through their website, forum , or email. You can also leave a comment or a review on the source where you downloaded the mod. You should provide as much information as possible, such as the mod name, version, description, screenshot, error message, etc.</p>
112
- <h4>Q: How can I create my own mod for Truckers of Europe 3?</h4>
113
- <p>A: To create your own mod for Truckers of Europe 3, you need to have some knowledge and skills in programming, modeling, texturing, sound editing, etc. You also need to have some tools and software, such as a text editor, a 3D modeling software, a sound editor, etc. You can find some tutorials and guides on how to create mods for Truckers of Europe 3 on the internet or on the official website of the game.</p> 197e85843d<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Q dana APK and Enjoy Low Interest Rates and Flexible Repayment Terms.md DELETED
@@ -1,131 +0,0 @@
1
-
2
- <h1>Download Q Dana APK: A Fast and Easy Loan Application for Indonesians</h1>
3
- <p>If you are looking for a quick and convenient way to get a loan in Indonesia, you might want to check out Q Dana. Q Dana is a loan application that offers cash loans online without any collateral or guarantee. You can borrow up to Rp8,000,000 with low interest rates and flexible repayment terms. All you need is your KTP, phone number, bank account, and stable income source. In this article, we will show you how to download Q Dana APK on your Android device, how to apply for a loan with Q Dana, what are the benefits of using Q Dana, and what are the requirements and terms of using Q Dana.</p>
4
- <h2>download q dana apk</h2><br /><p><b><b>Download Zip</b> &rarr;&rarr;&rarr; <a href="https://jinyurl.com/2uNKcm">https://jinyurl.com/2uNKcm</a></b></p><br /><br />
5
- <h2>How to Download Q Dana APK on Your Android Device</h2>
6
- <p>Downloading Q Dana APK is very easy and fast. You can follow these simple steps:</p>
7
- <ol>
8
- <li>Go to the official website of Q Dana or <a href="(^1^)">APKCombo</a>, where you can find the latest version of Q Dana APK.</li>
9
- <li>Click on the download button and choose the version you want. The file size is about <strong>6 MB</strong>.</li>
10
- <li>Allow unknown sources on your device settings. This will enable you to install apps from sources other than Google Play Store.</li>
11
- <li>Install the APK file and open the app. You will see the welcome screen of Q Dana.</li>
12
- </ol>
13
- <h2>How to Apply for a Loan with Q Dana</h2>
14
- <p>Applying for a loan with Q Dana is also very easy and fast. You can follow these simple steps:</p>
15
- <ol>
16
- <li>Register with your phone number and verify your identity with your KTP. You will need to take a selfie with your KTP and upload it to the app.</li>
17
- <li> <li>Submit your personal data and choose the loan amount and tenure. You can borrow from Rp600,000 to Rp8,000,000 with a tenure from 91 to 360 days. You will see the interest rate, service fee, and total repayment amount before you confirm your loan application.</li>
18
- <li>Wait for the review and approval (usually within 10 minutes to 2 hours). You will receive a notification on your phone when your loan is approved.</li>
19
- <li>Withdraw your loan money to your bank account. You can choose from various banks supported by Q Dana, such as BCA, BNI, BRI, Mandiri, CIMB Niaga, and more. You will receive your money within minutes after you confirm your withdrawal.</li>
20
- </ol>
21
- <h2>What are the Benefits of Using Q Dana</h2>
22
- <p>Using Q Dana has many benefits for borrowers who need cash loans online. Here are some of the benefits:</p>
23
- <ul>
24
- <li><strong>Low interest rate and service fee</strong>. Q Dana offers a competitive interest rate of up to 0.077% per day (2.31% per month), which is lower than many other loan applications in Indonesia. The service fee is also reasonable and transparent, ranging from Rp20,000 to Rp200,000 depending on the loan amount and tenure.</li>
25
- <li><strong>No collateral or guarantee required</strong>. Q Dana does not require any collateral or guarantee from borrowers. You only need to provide your KTP, phone number, bank account, and income source to apply for a loan.</li>
26
- <li><strong>Secure and reliable service with KSP supervision and data protection</strong>. Q Dana is supervised by the Indonesian Cooperative Supervisory Agency (KSP), which ensures that Q Dana complies with the regulations and standards of the cooperative sector. Q Dana also protects your personal data and privacy with encryption and security measures.</li>
27
- <li><strong>Fast and easy approval for repeat borrowers</strong>. Q Dana rewards loyal customers with faster and easier approval for repeat loans. If you have a good repayment history with Q Dana, you can get approved within minutes and enjoy higher loan amounts and longer tenures.</li>
28
- </ul>
29
- <h2>What are the Requirements and Terms of Using Q Dana</h2>
30
- <p>Using Q Dana also has some requirements and terms that you need to meet and follow. Here are some of the requirements and terms:</p>
31
- <table>
32
- <tr>
33
- <th>Requirement</th>
34
- <th>Description</th>
35
- </tr>
36
- <tr>
37
- <td><strong>Indonesian citizen with a valid KTP</strong></td>
38
- <td>You must be an Indonesian citizen with a valid KTP to apply for a loan with Q Dana. You will need to upload your KTP and take a selfie with it to verify your identity.</td>
39
- </tr>
40
- <tr>
41
- <td><strong>Age between 20 and 55 years old</strong></td>
42
- <td>You must be between 20 and 55 years old to apply for a loan with Q Dana. You will need to provide your date of birth on your personal data.</td>
43
- </tr>
44
- <tr>
45
- <td><strong>Active phone number and bank account</strong></td>
46
- <td>You must have an active phone number and bank account to apply for a loan with Q Dana. You will need to register with your phone number and choose your bank account for withdrawal.</td>
47
- </tr>
48
- <tr>
49
- <td><strong>Stable income source</strong></td>
50
- <td>You must have a stable income source to apply for a loan with Q Dana. You will need to provide information about your income source, such as your occupation, employer, salary, etc.</td>
51
- </tr>
52
- </table>
53
- <table>
54
- <tr>
55
- <th>Term</th>
56
- <th>Description</th>
57
- </tr>
58
- <tr>
59
- <td><strong>Loan amount from Rp600,000 to Rp8,000,000</strong></td>
60
- <td>You can borrow from Rp600,000 to Rp8,000,000 with Q Dana. The loan amount depends on your credit score, income source, repayment history, etc.</td>
61
- </tr>
62
- <tr>
63
- <td><strong>Loan tenure from 91 to 360 days</strong></td>
64
- <td>You can choose from 91 to 360 days for your loan tenure with Q Dana. The loan tenure depends on your loan amount, interest rate, service fee, etc.</td>
65
- </tr>
66
- <tr>
67
- <td><strong>Interest rate up to 0.077% per day (2.31% per month)</strong></td>
68
- <td>You will be charged an interest rate of up to 0.077% per day (2.31% per month) for your loan with Q Dana. The interest rate depends on your credit score, income source, repayment history, etc.</td>
69
- </tr>
70
- <h2>Conclusion and FAQs</h2>
71
- <p>In conclusion, Q Dana is a fast and easy loan application for Indonesians that offers cash loans online without any collateral or guarantee. You can download Q Dana APK on your Android device and apply for a loan with just your KTP, phone number, bank account, and income source. You can enjoy low interest rates, flexible repayment terms, secure and reliable service, and fast and easy approval with Q Dana. If you need a quick and convenient way to get a loan in Indonesia, you should download Q Dana APK today. Here are some frequently asked questions (FAQs) about Q Dana: <h3>FAQ 1: What is Q Dana?</h3>
72
- <p>Q Dana is a loan application that offers cash loans online for Indonesians. You can borrow up to Rp8,000,000 with low interest rates and flexible repayment terms. You do not need any collateral or guarantee to apply for a loan with Q Dana.</p>
73
- <h3>FAQ 2: How can I download Q Dana APK?</h3>
74
- <p>You can download Q Dana APK on your Android device by going to the official website of Q Dana or APKCombo, where you can find the latest version of Q Dana APK. You can click on the download button and choose the version you want. You will need to allow unknown sources on your device settings and install the APK file.</p>
75
- <p>download q dana apk latest version<br />
76
- download q dana apk for android<br />
77
- download q dana apk free<br />
78
- download q dana apk online<br />
79
- download q dana apk mod<br />
80
- download q dana apk terbaru<br />
81
- download q dana apk 2023<br />
82
- download q dana apk file<br />
83
- download q dana apk update<br />
84
- download q dana apk full<br />
85
- download q dana apk gratis<br />
86
- download q dana apk tanpa root<br />
87
- download q dana apk no ads<br />
88
- download q dana apk offline<br />
89
- download q dana apk hack<br />
90
- download q dana apk pro<br />
91
- download q dana apk premium<br />
92
- download q dana apk unlimited money<br />
93
- download q dana apk from google play<br />
94
- download q dana apk from apkpure<br />
95
- download q dana apk from uptodown<br />
96
- download q dana apk from apkmirror<br />
97
- download q dana apk from apkpure.com<br />
98
- download q dana apk from apkmirror.com<br />
99
- download q dana apk from apkombo.com[^1^]<br />
100
- how to download q dana apk<br />
101
- where to download q dana apk<br />
102
- why download q dana apk<br />
103
- what is q dana apk<br />
104
- who created q dana apk<br />
105
- benefits of downloading q dana apk<br />
106
- reviews of downloading q dana apk<br />
107
- tips for downloading q dana apk<br />
108
- steps for downloading q dana apk<br />
109
- guide for downloading q dana apk<br />
110
- tutorial for downloading q dana apk<br />
111
- video for downloading q dana apk<br />
112
- link for downloading q dana apk<br />
113
- website for downloading q dana apk<br />
114
- blog for downloading q dana apk<br />
115
- forum for downloading q dana apk<br />
116
- group for downloading q dana apk<br />
117
- community for downloading q dana apk<br />
118
- support for downloading q dana apk<br />
119
- help for downloading q dana apk<br />
120
- faq for downloading q dana apk<br />
121
- error for downloading q dana apk<br />
122
- fix for downloading q dana apk<br />
123
- solution for downloading q dana apk</p>
124
- <h3>FAQ 3: How can I apply for a loan with Q Dana?</h3>
125
- <p>You can apply for a loan with Q Dana by registering with your phone number and verifying your identity with your KTP. You will need to submit your personal data and choose the loan amount and tenure. You will wait for the review and approval, which usually takes 10 minutes to 2 hours. You will withdraw your loan money to your bank account.</p>
126
- <h3>FAQ 4: What are the benefits of using Q Dana?</h3>
127
- <p>Using Q Dana has many benefits, such as low interest rate and service fee, no collateral or guarantee required, secure and reliable service with KSP supervision and data protection, and fast and easy approval for repeat borrowers.</p>
128
- <h3>FAQ 5: What are the requirements and terms of using Q Dana?</h3>
129
- <p>Using Q Dana has some requirements and terms, such as being an Indonesian citizen with a valid KTP, being between 20 and 55 years old, having an active phone number and bank account, having a stable income source, borrowing from Rp600,000 to Rp8,000,000, choosing from 91 to 360 days for loan tenure, and paying up to 0.077% per day (2.31% per month) for interest rate.</p> 401be4b1e0<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Explore the Secrets of Evolution with Dino World Jurassic Builder 2 MOD APK.md DELETED
@@ -1,94 +0,0 @@
1
- <br />
2
- <h1>Dino World Jurassic Builder 2 Mod Apk Revdl: How to Build Your Own Dinosaur Park</h1>
3
- <p>If you are a fan of dinosaurs and park building games, you will love dino world jurassic builder 2. This is a free-to-play game that lets you create your own prehistoric park filled with dinosaurs. You can breed, feed, train, and fight with your dinosaurs in this exciting game. You can also explore different environments and discover new species of dinosaurs.</p>
4
- <h2>Features of Dino World Jurassic Builder 2</h2>
5
- <p>Dino world jurassic builder 2 has many features that make it a fun and addictive game. Here are some of them:</p>
6
- <h2>dino world jurassic builder 2 mod apk revdl</h2><br /><p><b><b>Download Zip</b> &middot;&middot;&middot;&middot;&middot; <a href="https://jinyurl.com/2uNPtU">https://jinyurl.com/2uNPtU</a></b></p><br /><br />
7
- <ul>
8
- <li>Over 12 elements of dinosaurs to collect, each with unique personality, powers, and skills</li>
9
- <li>A breeding lab where you can crossbreed your dinosaurs and create new ones</li>
10
- <li>A food farm where you can grow food for your dinosaurs</li>
11
- <li>A battle arena where you can challenge other players and win prizes</li>
12
- <li>A social area where you can connect with your friends and share your park</li>
13
- <li>A decor area where you can customize your park with stylish decorations</li>
14
- <li>A research center where you can unlock new buildings, medicines, and upgrades</li>
15
- <li>An expedition center where you can send teams to find fossils and DNA</li>
16
- </ul>
17
- <p>To play the game, you need to build enclosures for your dinosaurs and provide them with food, water, and terrain. You also need to manage your power supply, staff, guests, and finances. You need to keep your dinosaurs happy and healthy, as well as prevent them from escaping or causing trouble.</p>
18
- <h2>What is a Mod Apk and How to Download It from Revdl</h2>
19
- <p>A mod apk is a modified version of an original app that gives you access to extra features that are not available in the official version. For example, a mod apk may give you unlimited money, gems, resources, or unlock all levels.</p>
20
- <p>Revdl is a website that provides mod apks for various games and apps. You can download dino world jurassic builder 2 mod apk from revdl by following these steps:</p>
21
- <ol>
22
- <li>Go to [revdl.com](^1^) and search for dino world jurassic builder 2 mod apk</li>
23
- <li>Select the latest version of the mod apk and click on the download link</li>
24
- <li>Wait for the download to finish and then locate the file on your device</li>
25
- <li>Enable unknown sources on your device settings to allow installation of apps from outside sources</li>
26
- <li>Tap on the file and follow the instructions to install the mod apk</li>
27
- <li>Launch the game and enjoy the mod features</li>
28
- </ol>
29
- <h2>Benefits of Using Dino World Jurassic Builder 2 Mod Apk</h2>
30
- <p>Using dino world jurassic builder 2 mod apk has many benefits that will enhance your gaming experience. Here are some of them:</p>
31
- <ul>
32
- <li>You will get unlimited money and gems that you can use to buy anything in the game</li>
33
- <li>You will get unlimited food and resources that you can use to feed and upgrade your dinosaurs</li>
34
- <li>You will get unlimited DNA and fossils that you can use to breed and research new dinosaurs</li>
35
- <li>You will get all levels unlocked so you can play any stage you want</li>
36
- <li>You will get all dinosaurs unlocked so you can collect and use any dinosaur you want</li>
37
- <li>You will get all buildings unlocked so you can build any facility you want</li>
38
- <li>You will get all decorations unlocked so you can beautify your park as you wish</li>
39
- <li>You will get no ads so you can play without interruptions or distractions</li> <h2>Conclusion</h2>
40
- <p>Dino world jurassic builder 2 is a game that will appeal to anyone who loves dinosaurs and park building games. You can create your own dinosaur park and enjoy various activities with your dinosaurs. You can also download the mod apk from revdl and get access to unlimited features that will make your game more fun and easy. If you are looking for a game that combines creativity, strategy, and adventure, you should try dino world jurassic builder 2 mod apk revdl.</p>
41
- <h2>FAQs</h2>
42
- <p>Here are some frequently asked questions about the game and the mod apk:</p>
43
- <h3>Is dino world jurassic builder 2 mod apk safe to use?</h3>
44
- <p>Yes, the mod apk is safe to use as long as you download it from a trusted source like revdl. You should also scan the file with an antivirus before installing it. However, you should be aware that using the mod apk may violate the terms and conditions of the game and may result in your account being banned or suspended.</p>
45
- <h3>How do I update the mod apk?</h3>
46
- <p>To update the mod apk, you need to visit revdl and download the latest version of the mod apk. You can then install it over the existing one or uninstall the old one first. You should also backup your game data before updating to avoid losing your progress.</p>
47
- <p>dino world jurassic builder 2 mod apk unlimited money<br />
48
- dino world jurassic builder 2 mod apk download for android<br />
49
- dino world jurassic builder 2 mod apk latest version<br />
50
- dino world jurassic builder 2 mod apk rexdl<br />
51
- dino world jurassic builder 2 mod apk offline<br />
52
- dino world jurassic builder 2 mod apk free shopping<br />
53
- dino world jurassic builder 2 mod apk android 1<br />
54
- dino world jurassic builder 2 mod apk hack<br />
55
- dino world jurassic builder 2 mod apk no ads<br />
56
- dino world jurassic builder 2 mod apk obb<br />
57
- dino world jurassic builder 2 mod apk unlimited gems<br />
58
- dino world jurassic builder 2 mod apk full unlocked<br />
59
- dino world jurassic builder 2 mod apk pure<br />
60
- dino world jurassic builder 2 mod apk happymod<br />
61
- dino world jurassic builder 2 mod apk all dinosaurs unlocked<br />
62
- dino world jurassic builder 2 mod apk android republic<br />
63
- dino world jurassic builder 2 mod apk unlimited everything<br />
64
- dino world jurassic builder 2 mod apk uptodown<br />
65
- dino world jurassic builder 2 mod apk old version<br />
66
- dino world jurassic builder 2 mod apk mega<br />
67
- dino world jurassic builder 2 mod apk andropalace<br />
68
- dino world jurassic builder 2 mod apk vip<br />
69
- dino world jurassic builder 2 mod apk no root<br />
70
- dino world jurassic builder 2 mod apk lenov.ru<br />
71
- dino world jurassic builder 2 mod apk data file host<br />
72
- dino world jurassic builder 2 mod apk unlimited coins and gems<br />
73
- dino world jurassic builder 2 mod apk new update<br />
74
- dino world jurassic builder 2 mod apk ihackedit<br />
75
- dino world jurassic builder 2 mod apk online<br />
76
- dino world jurassic builder 2 mod apk cheat<br />
77
- dino world jurassic builder 2 mod apk mob.org<br />
78
- dino world jurassic builder 2 mod apk blackmod<br />
79
- dino world jurassic builder 2 mod apk platinmods<br />
80
- dino world jurassic builder 2 mod apk apkpure.com<br />
81
- dino world jurassic builder 2 mod apk apkmody.io<br />
82
- dino world jurassic builder 2 mod apk apkmirror.com<br />
83
- dino world jurassic builder 2 mod apk apknite.com<br />
84
- dino world jurassic builder 2 mod apk apksfree.com<br />
85
- dino world jurassic builder 2 mod apk apktada.com<br />
86
- dino world jurassic builder 2 mod apk apktovi.com</p>
87
- <h3>How do I backup my game data?</h3>
88
- <p>To backup your game data, you can use a cloud service like Google Play Games or Facebook to sync your game with your account. You can also use a file manager app to copy the game data folder from your device storage to another location.</p>
89
- <h3>How do I restore my game data?</h3>
90
- <p>To restore your game data, you can use the same cloud service or file manager app that you used to backup your game data. You can then sync your game with your account or copy the game data folder back to your device storage.</p>
91
- <h3>How do I contact the developer of the game?</h3>
92
- <p>To contact the developer of the game, you can visit their official website or social media pages. You can also email them at [email protected] or use the feedback option in the game settings.</p> 401be4b1e0<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/train/mel_processing.py DELETED
@@ -1,130 +0,0 @@
1
- import torch
2
- import torch.utils.data
3
- from librosa.filters import mel as librosa_mel_fn
4
-
5
-
6
- MAX_WAV_VALUE = 32768.0
7
-
8
-
9
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
10
- """
11
- PARAMS
12
- ------
13
- C: compression factor
14
- """
15
- return torch.log(torch.clamp(x, min=clip_val) * C)
16
-
17
-
18
- def dynamic_range_decompression_torch(x, C=1):
19
- """
20
- PARAMS
21
- ------
22
- C: compression factor used to compress
23
- """
24
- return torch.exp(x) / C
25
-
26
-
27
- def spectral_normalize_torch(magnitudes):
28
- return dynamic_range_compression_torch(magnitudes)
29
-
30
-
31
- def spectral_de_normalize_torch(magnitudes):
32
- return dynamic_range_decompression_torch(magnitudes)
33
-
34
-
35
- # Reusable banks
36
- mel_basis = {}
37
- hann_window = {}
38
-
39
-
40
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
41
- """Convert waveform into Linear-frequency Linear-amplitude spectrogram.
42
-
43
- Args:
44
- y :: (B, T) - Audio waveforms
45
- n_fft
46
- sampling_rate
47
- hop_size
48
- win_size
49
- center
50
- Returns:
51
- :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram
52
- """
53
- # Validation
54
- if torch.min(y) < -1.07:
55
- print("min value is ", torch.min(y))
56
- if torch.max(y) > 1.07:
57
- print("max value is ", torch.max(y))
58
-
59
- # Window - Cache if needed
60
- global hann_window
61
- dtype_device = str(y.dtype) + "_" + str(y.device)
62
- wnsize_dtype_device = str(win_size) + "_" + dtype_device
63
- if wnsize_dtype_device not in hann_window:
64
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
65
- dtype=y.dtype, device=y.device
66
- )
67
-
68
- # Padding
69
- y = torch.nn.functional.pad(
70
- y.unsqueeze(1),
71
- (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
72
- mode="reflect",
73
- )
74
- y = y.squeeze(1)
75
-
76
- # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2)
77
- spec = torch.stft(
78
- y,
79
- n_fft,
80
- hop_length=hop_size,
81
- win_length=win_size,
82
- window=hann_window[wnsize_dtype_device],
83
- center=center,
84
- pad_mode="reflect",
85
- normalized=False,
86
- onesided=True,
87
- return_complex=False,
88
- )
89
-
90
- # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame)
91
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
92
- return spec
93
-
94
-
95
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
96
- # MelBasis - Cache if needed
97
- global mel_basis
98
- dtype_device = str(spec.dtype) + "_" + str(spec.device)
99
- fmax_dtype_device = str(fmax) + "_" + dtype_device
100
- if fmax_dtype_device not in mel_basis:
101
- mel = librosa_mel_fn(
102
- sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
103
- )
104
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
105
- dtype=spec.dtype, device=spec.device
106
- )
107
-
108
- # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame)
109
- melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
110
- melspec = spectral_normalize_torch(melspec)
111
- return melspec
112
-
113
-
114
- def mel_spectrogram_torch(
115
- y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
116
- ):
117
- """Convert waveform into Mel-frequency Log-amplitude spectrogram.
118
-
119
- Args:
120
- y :: (B, T) - Waveforms
121
- Returns:
122
- melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram
123
- """
124
- # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame)
125
- spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center)
126
-
127
- # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame)
128
- melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax)
129
-
130
- return melspec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-history.tsx DELETED
@@ -1,48 +0,0 @@
1
- import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons"
2
-
3
- export function ChatHistory() {
4
- return (
5
- <div className="chat-history fixed top-18 right-4">
6
- <div className="chat-history-header text-sm font-semibold text-left w-[280px] px-4 py-6">
7
- 历史记录
8
- </div>
9
- <div className="chat-history-main">
10
- <div className="scroller">
11
- <div className="surface">
12
- <div className="threads">
13
- <div className="thread">
14
- <div className="primary-row">
15
- <button type="button" aria-label="加载聊天">
16
-
17
- </button>
18
- <div className="description">
19
- <h3 className="name">无标题的聊天</h3>
20
- </div>
21
- <h4 className="time">上午1:42</h4>
22
- <div className="controls">
23
-
24
- <button className="edit icon-button" type="button" aria-label="重命名">
25
- <IconEdit />
26
- </button>
27
-
28
- <button className="delete icon-button" type="button" aria-label="删除">
29
- <IconTrash />
30
- </button>
31
-
32
- <button className="more icon-button" type="button" aria-haspopup="true" aria-expanded="false" aria-label="更多">
33
- <IconMore />
34
- </button>
35
-
36
- <button className="export icon-button" type="button" aria-label="导出">
37
- <IconDownload />
38
- </button>
39
- </div>
40
- </div>
41
- </div>
42
- </div>
43
- </div>
44
- </div>
45
- </div>
46
- </div>
47
- )
48
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/index.html DELETED
@@ -1,113 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- <script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
9
- <script>mermaid.initialize({startOnLoad:true});</script>
10
- </head>
11
- <body>
12
-
13
- <iframe
14
- src="https://awacke1-biomed-nlp-ai-clinical-terminolo-41aa105.hf.space"
15
- frameborder="0"
16
- width="1024"
17
- height="3600"
18
- ></iframe>
19
-
20
- <iframe
21
- src="https://awacke1-hedis-roster-dash-component-sdoh.hf.space"
22
- frameborder="0"
23
- width="1024"
24
- height="2048"
25
- ></iframe>
26
- <iframe
27
- src="https://awacke1-health-assessments-summarizer.hf.space"
28
- frameborder="0"
29
- width="1024"
30
- height="2048"
31
- ></iframe>
32
- <iframe
33
- src="https://awacke1-hedis-dash-component-top-clinica-6a4a58c.hf.space"
34
- frameborder="0"
35
- width="1024"
36
- height="3600"
37
- ></iframe>
38
-
39
- <iframe
40
- src="https://awacke1-twitter-sentiment-live-realtime.hf.space"
41
- frameborder="0"
42
- width="850"
43
- height="1024"
44
- ></iframe>
45
-
46
- <iframe
47
- src="https://awacke1-streamlitwikipediachat.hf.space"
48
- frameborder="0"
49
- width="850"
50
- height="1024"
51
- ></iframe>
52
-
53
- <iframe
54
- src="https://awacke1-cognitive-ai-episodic-semantic-m-f4b3d67.hf.space"
55
- frameborder="0"
56
- width="850"
57
- height="1024"
58
- ></iframe>
59
-
60
-
61
-
62
- <div class="mermaid">
63
- journey
64
- title Create AI
65
- section Training
66
- Format DataSet Inputs Files, Data Splits: 5: Teacher
67
- Model Build w/ SKLearn, TF, Pytorch: 3: Student
68
- Determine Model Performance: 1: Teacher, Student
69
- section Deploy
70
- Web Deploy Local and Cloud: 5: Teacher
71
- Architecture Spaces Gradio Streamlit Heroku AWS Azure and GCCP: 5: Teacher
72
- section Testing
73
- Test Model with Input Datasets: 5: Teacher
74
- Examples. Inputs that Work, Inputs That Break Model: 5: Teacher
75
- Governance - Analyze, Publish Fairness, Equity, Bias for Datasets and Outputs: 5: Teacher
76
- </div>
77
-
78
- <div class="mermaid">
79
- sequenceDiagram
80
- participant Alice
81
- participant Bob
82
- Alice->>John: Hello John, how are you?
83
- loop Healthcheck
84
- John->>John: Fight against hypochondria
85
- end
86
- Note right of John: Rational thoughts<br/>prevail...
87
- John-->>Alice: Great!
88
- John->>Bob: How about you?
89
- Bob-->>John: Jolly good!
90
- </div>
91
-
92
- <div class="card">
93
- <h1>Welcome to the Mermaid Modeler Tip Sheet</h1>
94
- <p>
95
- You can use Mermaid inside HTML5 by including the script and a div with the class or mermaid.
96
- </p>
97
- <p>
98
- Documentation is located here:
99
- <a href="https://mermaid.js.org/syntax/flowchart.html" target="_blank"
100
- >Mermaid documentation</a
101
- >.
102
- </p>
103
- </div>
104
-
105
-
106
- Links:
107
- https://huggingface.co/spaces/awacke1/HEDIS.Roster.Dash.Component.Service
108
- https://huggingface.co/spaces/awacke1/HEDIS.Roster.Dash.Component.SDOH
109
- https://huggingface.co/spaces/awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary
110
-
111
-
112
- </body>
113
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/lats/utils.py DELETED
@@ -1,73 +0,0 @@
1
- import os
2
- import gzip
3
- import json
4
- import openai
5
- import jsonlines
6
-
7
- from typing import List
8
-
9
- openai.api_key = os.getenv("OPENAI_API_KEY")
10
-
11
- def make_printv(verbose: bool):
12
- def print_v(*args, **kwargs):
13
- if verbose:
14
- kwargs["flush"] = True
15
- print(*args, **kwargs)
16
- else:
17
- pass
18
- return print_v
19
-
20
-
21
- def read_jsonl(path: str) -> List[dict]:
22
- if not os.path.exists(path):
23
- raise FileNotFoundError(f"File `{path}` does not exist.")
24
- elif not path.endswith(".jsonl"):
25
- raise ValueError(f"File `{path}` is not a jsonl file.")
26
- items = []
27
- with jsonlines.open(path) as reader:
28
- for item in reader:
29
- items += [item]
30
- return items
31
-
32
-
33
- def write_jsonl(path: str, data: List[dict], append: bool = False):
34
- with jsonlines.open(path, mode='a' if append else 'w') as writer:
35
- for item in data:
36
- writer.write(item)
37
-
38
-
39
- def read_jsonl_gz(path: str) -> List[dict]:
40
- if not path.endswith(".jsonl.gz"):
41
- raise ValueError(f"File `{path}` is not a jsonl.gz file.")
42
- with gzip.open(path, "rt") as f:
43
- data = [json.loads(line) for line in f]
44
- return data
45
-
46
-
47
- # generator that returns the item and the index in the dataset.
48
- # if the results_path exists, it will skip all items that have been processed
49
- # before.
50
- def enumerate_resume(dataset, results_path):
51
- if not os.path.exists(results_path):
52
- for i, item in enumerate(dataset):
53
- yield i, item
54
- else:
55
- count = 0
56
- with jsonlines.open(results_path) as reader:
57
- for item in reader:
58
- count += 1
59
-
60
- for i, item in enumerate(dataset):
61
- # skip items that have been processed before
62
- if i < count:
63
- continue
64
- yield i, item
65
-
66
-
67
- def resume_success_count(dataset) -> int:
68
- count = 0
69
- for item in dataset:
70
- if "is_solved" in item and item["is_solved"]:
71
- count += 1
72
- return count
73
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AMR-KELEG/ALDi/app.py DELETED
@@ -1,170 +0,0 @@
1
- # Hint: this cheatsheet is magic! https://cheat-sheet.streamlit.app/
2
- import constants
3
- import pandas as pd
4
- import streamlit as st
5
- import matplotlib.pyplot as plt
6
- from transformers import BertForSequenceClassification, AutoTokenizer
7
-
8
- import altair as alt
9
- from altair import X, Y, Scale
10
- import base64
11
-
12
- import re
13
-
14
-
15
- def preprocess_text(arabic_text):
16
- """Apply preprocessing to the given Arabic text.
17
-
18
- Args:
19
- arabic_text: The Arabic text to be preprocessed.
20
-
21
- Returns:
22
- The preprocessed Arabic text.
23
- """
24
- no_urls = re.sub(
25
- r"(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b",
26
- "",
27
- arabic_text,
28
- flags=re.MULTILINE,
29
- )
30
- no_english = re.sub(r"[a-zA-Z]", "", no_urls)
31
-
32
- return no_english
33
-
34
-
35
- @st.cache_data
36
- def render_svg(svg):
37
- """Renders the given svg string."""
38
- b64 = base64.b64encode(svg.encode("utf-8")).decode("utf-8")
39
- html = rf'<p align="center"> <img src="data:image/svg+xml;base64,{b64}"/> </p>'
40
- c = st.container()
41
- c.write(html, unsafe_allow_html=True)
42
-
43
-
44
- @st.cache_data
45
- def convert_df(df):
46
- # IMPORTANT: Cache the conversion to prevent computation on every rerun
47
- return df.to_csv(index=None).encode("utf-8")
48
-
49
-
50
- @st.cache_resource
51
- def load_model(model_name):
52
- model = BertForSequenceClassification.from_pretrained(model_name)
53
- return model
54
-
55
-
56
- tokenizer = AutoTokenizer.from_pretrained(constants.MODEL_NAME)
57
- model = load_model(constants.MODEL_NAME)
58
-
59
-
60
- def compute_ALDi(sentences):
61
- """Computes the ALDi score for the given sentences.
62
-
63
- Args:
64
- sentences: A list of Arabic sentences.
65
-
66
- Returns:
67
- A list of ALDi scores for the given sentences.
68
- """
69
- progress_text = "Computing ALDi..."
70
- my_bar = st.progress(0, text=progress_text)
71
-
72
- BATCH_SIZE = 4
73
- output_logits = []
74
-
75
- preprocessed_sentences = [preprocess_text(s) for s in sentences]
76
-
77
- for first_index in range(0, len(preprocessed_sentences), BATCH_SIZE):
78
- inputs = tokenizer(
79
- preprocessed_sentences[first_index : first_index + BATCH_SIZE],
80
- return_tensors="pt",
81
- padding=True,
82
- )
83
- outputs = model(**inputs).logits.reshape(-1).tolist()
84
- output_logits = output_logits + [max(min(o, 1), 0) for o in outputs]
85
- my_bar.progress(
86
- min((first_index + BATCH_SIZE) / len(preprocessed_sentences), 1),
87
- text=progress_text,
88
- )
89
- my_bar.empty()
90
- return output_logits
91
-
92
-
93
- render_svg(open("assets/ALDi_logo.svg").read())
94
-
95
- tab1, tab2 = st.tabs(["Input a Sentence", "Upload a File"])
96
-
97
- with tab1:
98
- sent = st.text_input(
99
- "Arabic Sentence:", placeholder="Enter an Arabic sentence.", on_change=None
100
- )
101
-
102
- # TODO: Check if this is needed!
103
- clicked = st.button("Submit")
104
-
105
- if sent:
106
- ALDi_score = compute_ALDi([sent])[0]
107
-
108
- ORANGE_COLOR = "#FF8000"
109
- fig, ax = plt.subplots(figsize=(8, 1))
110
- fig.patch.set_facecolor("none")
111
- ax.set_facecolor("none")
112
-
113
- ax.spines["left"].set_color(ORANGE_COLOR)
114
- ax.spines["bottom"].set_color(ORANGE_COLOR)
115
- ax.tick_params(axis="x", colors=ORANGE_COLOR)
116
-
117
- ax.spines[["right", "top"]].set_visible(False)
118
-
119
- ax.barh(y=[0], width=[ALDi_score], color=ORANGE_COLOR)
120
- ax.set_xlim(0, 1)
121
- ax.set_ylim(-1, 1)
122
- ax.set_title(f"ALDi score is: {round(ALDi_score, 3)}", color=ORANGE_COLOR)
123
- ax.get_yaxis().set_visible(False)
124
- ax.set_xlabel("ALDi score", color=ORANGE_COLOR)
125
- st.pyplot(fig)
126
-
127
- print(sent)
128
- with open("logs.txt", "a") as f:
129
- f.write(sent + "\n")
130
-
131
- with tab2:
132
- file = st.file_uploader("Upload a file", type=["txt"])
133
- if file is not None:
134
- df = pd.read_csv(file, sep="\t", header=None)
135
- df.columns = ["Sentence"]
136
- df.reset_index(drop=True, inplace=True)
137
-
138
- # TODO: Run the model
139
- df["ALDi"] = compute_ALDi(df["Sentence"].tolist())
140
-
141
- # A horizontal rule
142
- st.markdown("""---""")
143
-
144
- chart = (
145
- alt.Chart(df.reset_index())
146
- .mark_area(color="darkorange", opacity=0.5)
147
- .encode(
148
- x=X(field="index", title="Sentence Index"),
149
- y=Y("ALDi", scale=Scale(domain=[0, 1])),
150
- )
151
- )
152
- st.altair_chart(chart.interactive(), use_container_width=True)
153
-
154
- col1, col2 = st.columns([4, 1])
155
-
156
- with col1:
157
- # Display the output
158
- st.table(
159
- df,
160
- )
161
-
162
- with col2:
163
- # Add a download button
164
- csv = convert_df(df)
165
- st.download_button(
166
- label=":file_folder: Download predictions as CSV",
167
- data=csv,
168
- file_name="ALDi_scores.csv",
169
- mime="text/csv",
170
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_cifar.py DELETED
@@ -1,16 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='ImageClassifier',
4
- backbone=dict(
5
- type='ResNet_CIFAR',
6
- depth=50,
7
- num_stages=4,
8
- out_indices=(3, ),
9
- style='pytorch'),
10
- neck=dict(type='GlobalAveragePooling'),
11
- head=dict(
12
- type='LinearClsHead',
13
- num_classes=10,
14
- in_channels=2048,
15
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
16
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/poetry/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Arabic Poetry Generator
3
- emoji: 🐠
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.6
8
- app_file: app.py
9
- pinned: true
10
- license: cc-by-nc-4.0
11
- duplicated_from: Aaaaaaaabdualh/poetry
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/comet_utils.py DELETED
@@ -1,166 +0,0 @@
1
- import logging
2
- import os
3
- from urllib.parse import urlparse
4
-
5
- try:
6
- import comet_ml
7
- except (ModuleNotFoundError, ImportError):
8
- comet_ml = None
9
-
10
- import yaml
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
- COMET_PREFIX = "comet://"
15
- COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
16
- COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv(
17
- "COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt"
18
- )
19
-
20
-
21
- def download_model_checkpoint(opt, experiment):
22
- model_dir = f"{opt.project}/{experiment.name}"
23
- os.makedirs(model_dir, exist_ok=True)
24
-
25
- model_name = COMET_MODEL_NAME
26
- model_asset_list = experiment.get_model_asset_list(model_name)
27
-
28
- if len(model_asset_list) == 0:
29
- logger.error(
30
- f"COMET ERROR: No checkpoints found for model name : {model_name}"
31
- )
32
- return
33
-
34
- model_asset_list = sorted(
35
- model_asset_list,
36
- key=lambda x: x["step"],
37
- reverse=True,
38
- )
39
- logged_checkpoint_map = {
40
- asset["fileName"]: asset["assetId"] for asset in model_asset_list
41
- }
42
-
43
- resource_url = urlparse(opt.weights)
44
- checkpoint_filename = resource_url.query
45
-
46
- if checkpoint_filename:
47
- asset_id = logged_checkpoint_map.get(checkpoint_filename)
48
- else:
49
- asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
50
- checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
51
-
52
- if asset_id is None:
53
- logger.error(
54
- f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment"
55
- )
56
- return
57
-
58
- try:
59
- logger.info(
60
- f"COMET INFO: Downloading checkpoint {checkpoint_filename}"
61
- )
62
- asset_filename = checkpoint_filename
63
-
64
- model_binary = experiment.get_asset(
65
- asset_id, return_type="binary", stream=False
66
- )
67
- model_download_path = f"{model_dir}/{asset_filename}"
68
- with open(model_download_path, "wb") as f:
69
- f.write(model_binary)
70
-
71
- opt.weights = model_download_path
72
-
73
- except Exception as e:
74
- logger.warning(
75
- "COMET WARNING: Unable to download checkpoint from Comet"
76
- )
77
- logger.exception(e)
78
-
79
-
80
- def set_opt_parameters(opt, experiment):
81
- """Update the opts Namespace with parameters
82
- from Comet's ExistingExperiment when resuming a run
83
-
84
- Args:
85
- opt (argparse.Namespace): Namespace of command line options
86
- experiment (comet_ml.APIExperiment): Comet API Experiment object
87
- """
88
- asset_list = experiment.get_asset_list()
89
- resume_string = opt.resume
90
-
91
- for asset in asset_list:
92
- if asset["fileName"] == "opt.yaml":
93
- asset_id = asset["assetId"]
94
- asset_binary = experiment.get_asset(
95
- asset_id, return_type="binary", stream=False
96
- )
97
- opt_dict = yaml.safe_load(asset_binary)
98
- for key, value in opt_dict.items():
99
- setattr(opt, key, value)
100
- opt.resume = resume_string
101
-
102
- # Save hyperparameters to YAML file
103
- # Necessary to pass checks in training script
104
- save_dir = f"{opt.project}/{experiment.name}"
105
- os.makedirs(save_dir, exist_ok=True)
106
-
107
- hyp_yaml_path = f"{save_dir}/hyp.yaml"
108
- with open(hyp_yaml_path, "w") as f:
109
- yaml.dump(opt.hyp, f)
110
- opt.hyp = hyp_yaml_path
111
-
112
-
113
- def check_comet_weights(opt):
114
- """Downloads model weights from Comet and updates the
115
- weights path to point to saved weights location
116
-
117
- Args:
118
- opt (argparse.Namespace): Command Line arguments passed
119
- to YOLOv5 training script
120
-
121
- Returns:
122
- None/bool: Return True if weights are successfully downloaded
123
- else return None
124
- """
125
- if comet_ml is None:
126
- return
127
-
128
- if isinstance(opt.weights, str):
129
- if opt.weights.startswith(COMET_PREFIX):
130
- api = comet_ml.API()
131
- resource = urlparse(opt.weights)
132
- experiment_path = f"{resource.netloc}{resource.path}"
133
- experiment = api.get(experiment_path)
134
- download_model_checkpoint(opt, experiment)
135
- return True
136
-
137
- return None
138
-
139
-
140
- def check_comet_resume(opt):
141
- """Restores run parameters to its original state based on the model checkpoint
142
- and logged Experiment parameters.
143
-
144
- Args:
145
- opt (argparse.Namespace): Command Line arguments passed
146
- to YOLOv5 training script
147
-
148
- Returns:
149
- None/bool: Return True if the run is restored successfully
150
- else return None
151
- """
152
- if comet_ml is None:
153
- return
154
-
155
- if isinstance(opt.resume, str):
156
- if opt.resume.startswith(COMET_PREFIX):
157
- api = comet_ml.API()
158
- resource = urlparse(opt.resume)
159
- experiment_path = f"{resource.netloc}{resource.path}"
160
- experiment = api.get(experiment_path)
161
- set_opt_parameters(opt, experiment)
162
- download_model_checkpoint(opt, experiment)
163
-
164
- return True
165
-
166
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/[id]/summarize/$types.d.ts DELETED
@@ -1,9 +0,0 @@
1
- import type * as Kit from '@sveltejs/kit';
2
-
3
- type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
4
- type RouteParams = { id: string }
5
- type RouteId = '/conversation/[id]/summarize';
6
-
7
- export type EntryGenerator = () => Promise<Array<RouteParams>> | Array<RouteParams>;
8
- export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
9
- export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Myshell.py DELETED
@@ -1,173 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json, uuid, hashlib, time, random
4
-
5
- from aiohttp import ClientSession
6
- from aiohttp.http import WSMsgType
7
- import asyncio
8
-
9
- from ..typing import AsyncGenerator
10
- from .base_provider import AsyncGeneratorProvider, format_prompt
11
-
12
-
13
- models = {
14
- "samantha": "1e3be7fe89e94a809408b1154a2ee3e1",
15
- "gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd",
16
- "gpt-4": "01c8de4fbfc548df903712b0922a4e01",
17
- }
18
-
19
-
20
- class Myshell(AsyncGeneratorProvider):
21
- url = "https://app.myshell.ai/chat"
22
- working = True
23
- supports_gpt_35_turbo = True
24
- supports_gpt_4 = True
25
-
26
- @classmethod
27
- async def create_async_generator(
28
- cls,
29
- model: str,
30
- messages: list[dict[str, str]],
31
- timeout: int = 90,
32
- **kwargs
33
- ) -> AsyncGenerator:
34
- if not model:
35
- bot_id = models["samantha"]
36
- elif model in models:
37
- bot_id = models[model]
38
- else:
39
- raise ValueError(f"Model are not supported: {model}")
40
-
41
- user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
42
- visitor_id = generate_visitor_id(user_agent)
43
-
44
- async with ClientSession(
45
- headers={'User-Agent': user_agent}
46
- ) as session:
47
- async with session.ws_connect(
48
- "wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
49
- autoping=False,
50
- timeout=timeout
51
- ) as wss:
52
- # Send and receive hello message
53
- await wss.receive_str()
54
- message = json.dumps({"token": None, "visitorId": visitor_id})
55
- await wss.send_str(f"40/chat,{message}")
56
- await wss.receive_str()
57
-
58
- # Fix "need_verify_captcha" issue
59
- await asyncio.sleep(5)
60
-
61
- # Create chat message
62
- text = format_prompt(messages)
63
- chat_data = json.dumps(["text_chat",{
64
- "reqId": str(uuid.uuid4()),
65
- "botUid": bot_id,
66
- "sourceFrom": "myshellWebsite",
67
- "text": text,
68
- **generate_signature(text)
69
- }])
70
-
71
- # Send chat message
72
- chat_start = "42/chat,"
73
- chat_message = f"{chat_start}{chat_data}"
74
- await wss.send_str(chat_message)
75
-
76
- # Receive messages
77
- async for message in wss:
78
- if message.type != WSMsgType.TEXT:
79
- continue
80
- # Ping back
81
- if message.data == "2":
82
- await wss.send_str("3")
83
- continue
84
- # Is not chat message
85
- if not message.data.startswith(chat_start):
86
- continue
87
- data_type, data = json.loads(message.data[len(chat_start):])
88
- if data_type == "text_stream":
89
- if data["data"]["text"]:
90
- yield data["data"]["text"]
91
- elif data["data"]["isFinal"]:
92
- break
93
- elif data_type in ("message_replied", "need_verify_captcha"):
94
- raise RuntimeError(f"Received unexpected message: {data_type}")
95
-
96
-
97
- @classmethod
98
- @property
99
- def params(cls):
100
- params = [
101
- ("model", "str"),
102
- ("messages", "list[dict[str, str]]"),
103
- ("stream", "bool"),
104
- ]
105
- param = ", ".join([": ".join(p) for p in params])
106
- return f"g4f.provider.{cls.__name__} supports: ({param})"
107
-
108
-
109
- def generate_timestamp() -> str:
110
- return str(
111
- int(
112
- str(int(time.time() * 1000))[:-1]
113
- + str(
114
- sum(
115
- 2 * int(digit)
116
- if idx % 2 == 0
117
- else 3 * int(digit)
118
- for idx, digit in enumerate(str(int(time.time() * 1000))[:-1])
119
- )
120
- % 10
121
- )
122
- )
123
- )
124
-
125
- def generate_signature(text: str):
126
- timestamp = generate_timestamp()
127
- version = 'v1.0.0'
128
- secret = '8@VXGK3kKHr!u2gA'
129
- data = f"{version}#{text}#{timestamp}#{secret}"
130
- signature = hashlib.md5(data.encode()).hexdigest()
131
- signature = signature[::-1]
132
- return {
133
- "signature": signature,
134
- "timestamp": timestamp,
135
- "version": version
136
- }
137
-
138
- def xor_hash(B: str):
139
- r = []
140
- i = 0
141
-
142
- def o(e, t):
143
- o_val = 0
144
- for i in range(len(t)):
145
- o_val |= r[i] << (8 * i)
146
- return e ^ o_val
147
-
148
- for e in range(len(B)):
149
- t = ord(B[e])
150
- r.insert(0, 255 & t)
151
-
152
- if len(r) >= 4:
153
- i = o(i, r)
154
- r = []
155
-
156
- if len(r) > 0:
157
- i = o(i, r)
158
-
159
- return hex(i)[2:]
160
-
161
- def performance() -> str:
162
- t = int(time.time() * 1000)
163
- e = 0
164
- while t == int(time.time() * 1000):
165
- e += 1
166
- return hex(t)[2:] + hex(e)[2:]
167
-
168
- def generate_visitor_id(user_agent: str) -> str:
169
- f = performance()
170
- r = hex(int(random.random() * (16**16)))[2:-2]
171
- d = xor_hash(user_agent)
172
- e = hex(1080 * 1920)[2:]
173
- return f"{f}-{r}-{d}-{e}-{f}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adithedev/Text-Summarization-Tool/app.py DELETED
@@ -1,81 +0,0 @@
1
- import streamlit as st
2
- import base64
3
- import re
4
- import spacy
5
- from heapq import nlargest
6
-
7
- st.title("Text Summarizer")
8
- with st.form(key = "clf_form"):
9
- text_input = st.text_area("Type Here: ")
10
- input_slider = st.slider(step=0.1,min_value=0.2,max_value=0.7,label="How much portion of the text do you wish to be summarized, Eg: 0.2 --> 20% of the Original Text")
11
- submit_btn = st.form_submit_button(label = "Submit")
12
- countOfWords = len(text_input.split())
13
-
14
- class Model():
15
- try:
16
- nlp = spacy.load("en_core_web_sm")
17
- except OSError:
18
- import subprocess
19
- subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
20
- def predict(text):
21
- stop_words = [ 'stop', 'the', 'to', 'and', 'a', 'in', 'it', 'is', 'I', 'that', 'had', 'on', 'for', 'were', 'was']
22
- nlp = spacy.load("en_core_web_sm")
23
- doc = nlp(text)
24
-
25
- lemmatized_text = " ".join([token.lemma_ for token in doc])
26
-
27
- re_text = re.sub("[^\s\w,.]"," ",lemmatized_text)
28
- re_text = re.sub("[ ]{2,}"," ",re_text).lower()
29
-
30
- word_frequencies = {}
31
- for word in doc:
32
- if word.text not in "\n":
33
- if word.text not in stop_words:
34
- if word.text not in word_frequencies.keys():
35
- word_frequencies[word.text] = 1
36
- else:
37
- word_frequencies[word.text] +=1
38
-
39
- max_word_frequency = max(word_frequencies.values(),default=0)
40
-
41
- for word in word_frequencies.keys():
42
- word_frequencies[word] = word_frequencies[word] / max_word_frequency
43
-
44
- sent_tokens = [sent for sent in doc.sents]
45
- sent_scores = {}
46
-
47
- for sent in sent_tokens:
48
- for word in sent:
49
- if word.text in word_frequencies.keys():
50
- if sent not in sent_scores.keys():
51
- sent_scores[sent] = word_frequencies[word.text]
52
- else:
53
- sent_scores[sent] += word_frequencies[word.text]
54
-
55
- sentence_length = int(len(sent_tokens)*input_slider)
56
- summary = nlargest(sentence_length,sent_scores,sent_scores.get)
57
- final_summary = [word.text for word in summary]
58
- final_summary = " ".join(final_summary)
59
- return final_summary
60
-
61
-
62
- if submit_btn:
63
- if text_input == "":
64
- st.error("Enter something in order to summarize it.",icon="⛔️")
65
- else:
66
- if countOfWords <=80:
67
- st.warning("Pls enter more than 80 words in order to summarize it.",icon="⚠️")
68
- else:
69
- st.subheader("Output: ")
70
-
71
- col1,col2 = st.columns(2)
72
-
73
- output = Model.predict(text=text_input)
74
-
75
- with col1:
76
- st.info("Original Text: ")
77
- st.write(text_input)
78
-
79
- with col2:
80
- st.info("Summarized Text: ")
81
- st.write(output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/sort.py DELETED
@@ -1,367 +0,0 @@
1
- from __future__ import print_function
2
-
3
- import os
4
- import numpy as np
5
-
6
- ##### NEW
7
- # !pip --no-cache-dir install -U --force-reinstall matplotlib
8
- import tkinter
9
- import matplotlib
10
- matplotlib.use('Agg')
11
- ###### NEW end
12
- import matplotlib.pyplot as plt
13
- import matplotlib.patches as patches
14
- from skimage import io
15
- from random import randint
16
- import glob
17
- import time
18
- import argparse
19
- from filterpy.kalman import KalmanFilter
20
-
21
-
22
- def get_color():
23
- # r = randint(0, 255)
24
- # g = randint(0, 255)
25
- # b = randint(0, 255)
26
- color = (randint(0, 255), randint(0, 255), randint(0, 255))
27
- return color
28
- def linear_assignment(cost_matrix):
29
- try:
30
- import lap #linear assignment problem solver
31
- _, x, y = lap.lapjv(cost_matrix, extend_cost = True)
32
- return np.array([[y[i],i] for i in x if i>=0])
33
- except ImportError:
34
- from scipy.optimize import linear_sum_assignment
35
- x,y = linear_sum_assignment(cost_matrix)
36
- return np.array(list(zip(x,y)))
37
-
38
-
39
- """From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]"""
40
- def iou_batch(bb_test, bb_gt):
41
-
42
- bb_gt = np.expand_dims(bb_gt, 0)
43
- bb_test = np.expand_dims(bb_test, 1)
44
-
45
- xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0])
46
- yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
47
- xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
48
- yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
49
- w = np.maximum(0., xx2 - xx1)
50
- h = np.maximum(0., yy2 - yy1)
51
- wh = w * h
52
- o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
53
- + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
54
- return(o)
55
-
56
-
57
- """Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio"""
58
- def convert_bbox_to_z(bbox):
59
- w = bbox[2] - bbox[0]
60
- h = bbox[3] - bbox[1]
61
- x = bbox[0] + w/2.
62
- y = bbox[1] + h/2.
63
- s = w * h
64
- #scale is just area
65
- r = w / float(h)
66
- return np.array([x, y, s, r]).reshape((4, 1))
67
-
68
-
69
- """Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
70
- [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right"""
71
- def convert_x_to_bbox(x, score=None):
72
- w = np.sqrt(x[2] * x[3])
73
- h = x[2] / w
74
- if(score==None):
75
- return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
76
- else:
77
- return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
78
-
79
- """This class represents the internal state of individual tracked objects observed as bbox."""
80
- class KalmanBoxTracker(object):
81
-
82
- count = 0
83
- def __init__(self, bbox):
84
- """
85
- Initialize a tracker using initial bounding box
86
-
87
- Parameter 'bbox' must have 'detected class' int number at the -1 position.
88
- """
89
- self.kf = KalmanFilter(dim_x=7, dim_z=4)
90
- self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
91
- self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
92
-
93
- self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes')
94
- self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
95
- self.kf.P *= 10.
96
- self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things)
97
- self.kf.Q[4:,4:] *= 0.5
98
-
99
- self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR
100
- self.time_since_update = 0
101
- self.id = KalmanBoxTracker.count
102
- KalmanBoxTracker.count += 1
103
- self.history = []
104
- self.hits = 0
105
- self.hit_streak = 0
106
- self.age = 0
107
- self.centroidarr = []
108
- CX = (bbox[0]+bbox[2])//2
109
- CY = (bbox[1]+bbox[3])//2
110
- self.centroidarr.append((CX,CY))
111
-
112
-
113
- #keep yolov5 detected class information
114
- self.detclass = bbox[5]
115
-
116
- def update(self, bbox):
117
- """
118
- Updates the state vector with observed bbox
119
- """
120
- self.time_since_update = 0
121
- self.history = []
122
- self.hits += 1
123
- self.hit_streak += 1
124
- self.kf.update(convert_bbox_to_z(bbox))
125
- self.detclass = bbox[5]
126
- CX = (bbox[0]+bbox[2])//2
127
- CY = (bbox[1]+bbox[3])//2
128
- self.centroidarr.append((CX,CY))
129
-
130
- def predict(self):
131
- """
132
- Advances the state vector and returns the predicted bounding box estimate
133
- """
134
- if((self.kf.x[6]+self.kf.x[2])<=0):
135
- self.kf.x[6] *= 0.0
136
- self.kf.predict()
137
- self.age += 1
138
- if(self.time_since_update>0):
139
- self.hit_streak = 0
140
- self.time_since_update += 1
141
- self.history.append(convert_x_to_bbox(self.kf.x))
142
- # bbox=self.history[-1]
143
- # CX = (bbox[0]+bbox[2])/2
144
- # CY = (bbox[1]+bbox[3])/2
145
- # self.centroidarr.append((CX,CY))
146
-
147
- return self.history[-1]
148
-
149
-
150
- def get_state(self):
151
- """
152
- Returns the current bounding box estimate
153
- # test
154
- arr1 = np.array([[1,2,3,4]])
155
- arr2 = np.array([0])
156
- arr3 = np.expand_dims(arr2, 0)
157
- np.concatenate((arr1,arr3), axis=1)
158
- """
159
- arr_detclass = np.expand_dims(np.array([self.detclass]), 0)
160
-
161
- arr_u_dot = np.expand_dims(self.kf.x[4],0)
162
- arr_v_dot = np.expand_dims(self.kf.x[5],0)
163
- arr_s_dot = np.expand_dims(self.kf.x[6],0)
164
-
165
- return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1)
166
-
167
- def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3):
168
- """
169
- Assigns detections to tracked object (both represented as bounding boxes)
170
- Returns 3 lists of
171
- 1. matches,
172
- 2. unmatched_detections
173
- 3. unmatched_trackers
174
- """
175
- if(len(trackers)==0):
176
- return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
177
-
178
- iou_matrix = iou_batch(detections, trackers)
179
-
180
- if min(iou_matrix.shape) > 0:
181
- a = (iou_matrix > iou_threshold).astype(np.int32)
182
- if a.sum(1).max() == 1 and a.sum(0).max() ==1:
183
- matched_indices = np.stack(np.where(a), axis=1)
184
- else:
185
- matched_indices = linear_assignment(-iou_matrix)
186
- else:
187
- matched_indices = np.empty(shape=(0,2))
188
-
189
- unmatched_detections = []
190
- for d, det in enumerate(detections):
191
- if(d not in matched_indices[:,0]):
192
- unmatched_detections.append(d)
193
-
194
-
195
- unmatched_trackers = []
196
- for t, trk in enumerate(trackers):
197
- if(t not in matched_indices[:,1]):
198
- unmatched_trackers.append(t)
199
-
200
- #filter out matched with low IOU
201
- matches = []
202
- for m in matched_indices:
203
- if(iou_matrix[m[0], m[1]]<iou_threshold):
204
- unmatched_detections.append(m[0])
205
- unmatched_trackers.append(m[1])
206
- else:
207
- matches.append(m.reshape(1,2))
208
-
209
- if(len(matches)==0):
210
- matches = np.empty((0,2), dtype=int)
211
- else:
212
- matches = np.concatenate(matches, axis=0)
213
-
214
- return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
215
-
216
-
217
- class Sort(object):
218
- def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
219
- """
220
- Parameters for SORT
221
- """
222
- self.max_age = max_age
223
- self.min_hits = min_hits
224
- self.iou_threshold = iou_threshold
225
- self.trackers = []
226
- self.frame_count = 0
227
- self.color_list = []
228
-
229
-
230
-
231
- def getTrackers(self,):
232
- return self.trackers
233
-
234
- def update(self, dets= np.empty((0,6)), unique_color = False):
235
- """
236
- Parameters:
237
- 'dets' - a numpy array of detection in the format [[x1, y1, x2, y2, score], [x1,y1,x2,y2,score],...]
238
-
239
- Ensure to call this method even frame has no detections. (pass np.empty((0,5)))
240
-
241
- Returns a similar array, where the last column is object ID (replacing confidence score)
242
-
243
- NOTE: The number of objects returned may differ from the number of objects provided.
244
- """
245
- self.frame_count += 1
246
-
247
- # Get predicted locations from existing trackers
248
- trks = np.zeros((len(self.trackers), 6))
249
- to_del = []
250
- ret = []
251
- for t, trk in enumerate(trks):
252
-
253
- pos = self.trackers[t].predict()[0]
254
- trk[:] = [pos[0], pos[1], pos[2], pos[3], 0, 0]
255
- if np.any(np.isnan(pos)):
256
- to_del.append(t)
257
- trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
258
- for t in reversed(to_del):
259
- self.trackers.pop(t)
260
- if unique_color:
261
- self.color_list.pop(t)
262
- matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.iou_threshold)
263
-
264
- # Update matched trackers with assigned detections
265
- for m in matched:
266
- self.trackers[m[1]].update(dets[m[0], :])
267
-
268
- # Create and initialize new trackers for unmatched detections
269
- for i in unmatched_dets:
270
- trk = KalmanBoxTracker(np.hstack((dets[i,:], np.array([0]))))
271
- self.trackers.append(trk)
272
- if unique_color:
273
- self.color_list.append(get_color())
274
-
275
-
276
- i = len(self.trackers)
277
- for trk in reversed(self.trackers):
278
- d = trk.get_state()[0]
279
- if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
280
- ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value
281
- i -= 1
282
- #remove dead tracklet
283
- if(trk.time_since_update >self.max_age):
284
- self.trackers.pop(i)
285
- if unique_color:
286
- self.color_list.pop(i)
287
-
288
- if(len(ret) > 0):
289
- return np.concatenate(ret)
290
- return np.empty((0,6))
291
-
292
- def parse_args():
293
- """Parse input arguments."""
294
- parser = argparse.ArgumentParser(description='SORT demo')
295
- parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
296
- parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
297
- parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
298
- parser.add_argument("--max_age",
299
- help="Maximum number of frames to keep alive a track without associated detections.",
300
- type=int, default=1)
301
- parser.add_argument("--min_hits",
302
- help="Minimum number of associated detections before track is initialised.",
303
- type=int, default=3)
304
- parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
305
- args = parser.parse_args()
306
- return args
307
-
308
- if __name__ == '__main__':
309
- # all train
310
- args = parse_args()
311
- display = args.display
312
- phase = args.phase
313
- total_time = 0.0
314
- total_frames = 0
315
- colours = np.random.rand(32, 3) #used only for display
316
- if(display):
317
- if not os.path.exists('mot_benchmark'):
318
- print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
319
- exit()
320
- plt.ion()
321
- fig = plt.figure()
322
- ax1 = fig.add_subplot(111, aspect='equal')
323
-
324
- if not os.path.exists('output'):
325
- os.makedirs('output')
326
- pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
327
- for seq_dets_fn in glob.glob(pattern):
328
- mot_tracker = Sort(max_age=args.max_age,
329
- min_hits=args.min_hits,
330
- iou_threshold=args.iou_threshold) #create instance of the SORT tracker
331
- seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
332
- seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
333
-
334
- with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file:
335
- print("Processing %s."%(seq))
336
- for frame in range(int(seq_dets[:,0].max())):
337
- frame += 1 #detection and frame numbers begin at 1
338
- dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
339
- dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
340
- total_frames += 1
341
-
342
- if(display):
343
- fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame))
344
- im =io.imread(fn)
345
- ax1.imshow(im)
346
- plt.title(seq + ' Tracked Targets')
347
-
348
- start_time = time.time()
349
- trackers = mot_tracker.update(dets)
350
- cycle_time = time.time() - start_time
351
- total_time += cycle_time
352
-
353
- for d in trackers:
354
- print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
355
- if(display):
356
- d = d.astype(np.int32)
357
- ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
358
-
359
- if(display):
360
- fig.canvas.flush_events()
361
- plt.draw()
362
- ax1.cla()
363
-
364
- print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
365
-
366
- if(display):
367
- print("Note: to get real runtime results run without the option: --display")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/README.md DELETED
@@ -1,300 +0,0 @@
1
- ---
2
- title: academic-chatgpt
3
- emoji: 😻
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.25.0
8
- python_version: 3.11
9
- app_file: main.py
10
- pinned: false
11
- duplicated_from: qingxu98/academic-chatgpt-beta
12
- ---
13
-
14
- # ChatGPT 学术优化
15
-
16
- **如果喜欢这个项目,请给它一个Star;如果你发明了更好用的快捷键或函数插件,欢迎发issue或者pull requests**
17
-
18
- If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself.
19
-
20
- > **Note**
21
- >
22
- > 1.请注意只有**红颜色**标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR!
23
- >
24
- > 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。
25
- >
26
-
27
-
28
- <div align="center">
29
-
30
- 功能 | 描述
31
- --- | ---
32
- 一键润色 | 支持一键润色、一键查找论文语法错误
33
- 一键中英互译 | 一键中英互译
34
- 一键代码解释 | 可以正确显示代码、解释代码
35
- [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
36
- [配置代理服务器](https://www.bilibili.com/video/BV1rc411W7Dr) | 支持配置代理服务器
37
- 模块化设计 | 支持自定义高阶的函数插件与[函数插件],插件支持[热更新](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
38
- [自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
39
- [程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树
40
- 读论文 | [函数插件] 一键解读latex论文全文并生成摘要
41
- Latex全文翻译、润色 | [函数插件] 一键翻译或润色latex论文
42
- 批量注释生成 | [函数插件] 一键批量生成函数注释
43
- chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
44
- [arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
45
- [PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
46
- [谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你选择有趣的文章
47
- 公式/图片/表格显示 | 可以同时显示公式的tex形式和渲染形式,支持公式、代码高亮
48
- 多线程函数插件支持 | 支持多线调用chatgpt,一键处理海量文本或程序
49
- 启动暗色gradio[主题](https://github.com/binary-husky/chatgpt_academic/issues/173) | 在浏览器url后面添加```/?__dark-theme=true```可以切换dark主题
50
- [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持,[API2D](https://api2d.com/)接口支持 | 同时被GPT3.5、GPT4和[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)伺候的感觉一定会很不错吧?
51
- huggingface免科学上网[在线体验](https://huggingface.co/spaces/qingxu98/gpt-academic) | 登陆huggingface后复制[此空间](https://huggingface.co/spaces/qingxu98/gpt-academic)
52
- …… | ……
53
-
54
- </div>
55
-
56
-
57
- - 新界面(修改config.py中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
58
- <div align="center">
59
- <img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
60
- </div>
61
-
62
-
63
- - 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
64
- <div align="center">
65
- <img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
66
- </div>
67
-
68
- - 润色/纠错
69
- <div align="center">
70
- <img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
71
- </div>
72
-
73
- - 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
74
- <div align="center">
75
- <img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
76
- </div>
77
-
78
- - 懒得看项目代码?整个工程直接给chatgpt炫嘴里
79
- <div align="center">
80
- <img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
81
- </div>
82
-
83
- - 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
84
- <div align="center">
85
- <img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
86
- </div>
87
-
88
- 多种大语言模型混合调用[huggingface测试版](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta)(huggingface版不支持chatglm)
89
-
90
-
91
- ---
92
-
93
- ## 安装-方法1:直接运行 (Windows, Linux or MacOS)
94
-
95
- 1. 下载项目
96
- ```sh
97
- git clone https://github.com/binary-husky/chatgpt_academic.git
98
- cd chatgpt_academic
99
- ```
100
-
101
- 2. 配置API_KEY和代理设置
102
-
103
- 在`config.py`中,配置 海外Proxy 和 OpenAI API KEY,说明如下
104
- ```
105
- 1. 如果你在国内,需要设置海外代理才能够顺利使用 OpenAI API,设置方法请仔细阅读config.py(1.修改其中的USE_PROXY为True; 2.按照说明修改其中的proxies)。
106
- 2. 配置 OpenAI API KEY。你需要在 OpenAI 官网上注册并获取 API KEY。一旦你拿到了 API KEY,在 config.py 文件里配置好即可。
107
- 3. 与代理网络有关的issue(网络超时、代理不起作用)汇总到 https://github.com/binary-husky/chatgpt_academic/issues/1
108
- ```
109
- (P.S. 程序运行时会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。因此,如果您能理解我们的配置读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中。`config_private.py`不受git管控,可以让您的隐私信息更加安全。)
110
-
111
-
112
- 3. 安装依赖
113
- ```sh
114
- # (选择一)推荐
115
- python -m pip install -r requirements.txt
116
-
117
- # (选择二)如果您使用anaconda,步骤也是类似的:
118
- # (选择二.1)conda create -n gptac_venv python=3.11
119
- # (选择二.2)conda activate gptac_venv
120
- # (选择二.3)python -m pip install -r requirements.txt
121
-
122
- # 备注:使用官方pip源或者阿里pip源,其他pip源(如一些大学的pip)有可能出问题,临时换源方法:
123
- # python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
124
- ```
125
-
126
- 如果需要支持清华ChatGLM,需要额外安装更多依赖(不熟悉python者、电脑配置不佳者,建议不要尝试):
127
- ```sh
128
- python -m pip install -r request_llm/requirements_chatglm.txt
129
- ```
130
-
131
- 4. 运行
132
- ```sh
133
- python main.py
134
- ```
135
-
136
- 5. 测试函数插件
137
- ```
138
- - 测试Python项目分析
139
- input区域 输入 `./crazy_functions/test_project/python/dqn` , 然后点击 "解析整个Python项目"
140
- - 测试自我代码解读
141
- 点击 "[多线程Demo] 解析此项目本身(源码自译解)"
142
- - 测试实验功能模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能
143
- 点击 "[函数插件模板Demo] 历史上的今天"
144
- - 函数插件区下拉菜单中有更多功能可供选择
145
- ```
146
-
147
- ## 安装-方法2:使用docker (Linux)
148
-
149
- 1. 仅ChatGPT(推荐大多数人选择)
150
- ``` sh
151
- # 下载项目
152
- git clone https://github.com/binary-husky/chatgpt_academic.git
153
- cd chatgpt_academic
154
- # 配置 海外Proxy 和 OpenAI API KEY
155
- 用任意文本编辑器编辑 config.py
156
- # 安装
157
- docker build -t gpt-academic .
158
- # 运行
159
- docker run --rm -it --net=host gpt-academic
160
-
161
- # 测试函数插件
162
- ## 测试函数插件模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能
163
- 点击 "[函数插件模板Demo] 历史上的今天"
164
- ## 测试给Latex项目写摘要
165
- input区域 输入 ./crazy_functions/test_project/latex/attention , 然后点击 "读Tex论文写摘要"
166
- ## 测试Python项目分析
167
- input区域 输入 ./crazy_functions/test_project/python/dqn , 然后点击 "解析整个Python项目"
168
-
169
- 函数插件区下拉菜单中有更多功能可供选择
170
- ```
171
-
172
- 2. ChatGPT+ChatGLM(需要对docker非常熟悉 + 电脑配置足够强)
173
-
174
- ``` sh
175
- # 修改dockerfile
176
- cd docs && nano Dockerfile+ChatGLM
177
- # How to build | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
178
- docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
179
- # How to run | 如何运行 (1) 直接运行:
180
- docker run --rm -it --net=host --gpus=all gpt-academic
181
- # How to run | 如何运行 (2) 我想运行之前进容器做一些调整:
182
- docker run --rm -it --net=host --gpus=all gpt-academic bash
183
- ```
184
-
185
-
186
- ## 安装-方法3:其他部署方式
187
-
188
- 1. 远程云服务器部署
189
- 请访问[部署wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
190
-
191
- 2. 使用WSL2(Windows Subsystem for Linux 子系统)
192
- 请访问[部署wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
193
-
194
-
195
- ## 安装-代理配置
196
- 1. 常规方法
197
- [配置代理](https://github.com/binary-husky/chatgpt_academic/issues/1)
198
-
199
- 2. 纯新手教程
200
- [纯新手教程](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
201
-
202
-
203
- ---
204
-
205
- ## 自定义新的便捷按钮(学术快捷键自定义)
206
- 任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
207
- 例如
208
- ```
209
- "超级英译中": {
210
- # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
211
- "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
212
-
213
- # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。
214
- "Suffix": "",
215
- },
216
- ```
217
- <div align="center">
218
- <img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
219
- </div>
220
-
221
- ---
222
-
223
-
224
- ## 部分功能展示
225
-
226
- ### 图片显示:
227
-
228
- <div align="center">
229
- <img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
230
- </div>
231
-
232
-
233
- ### 如果一个程序能够读懂并剖析自己:
234
-
235
- <div align="center">
236
- <img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
237
- </div>
238
-
239
- <div align="center">
240
- <img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
241
- </div>
242
-
243
- ### 其他任意Python/Cpp项目剖析:
244
- <div align="center">
245
- <img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
246
- </div>
247
-
248
- <div align="center">
249
- <img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
250
- </div>
251
-
252
- ### Latex论文一键阅读理解与摘要生成
253
- <div align="center">
254
- <img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
255
- </div>
256
-
257
- ### 自动报告生成
258
- <div align="center">
259
- <img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
260
- <img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
261
- <img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
262
- </div>
263
-
264
- ### 模块化功能设计
265
- <div align="center">
266
- <img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
267
- <img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
268
- </div>
269
-
270
-
271
- ### 源代码转译英文
272
-
273
- <div align="center">
274
- <img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
275
- </div>
276
-
277
- ## Todo 与 版本规划:
278
- - version 3.2+ (todo): 函数插件支持更多参数接口
279
- - version 3.1: 支持同时问询多个gpt模型!支持api2d,支持多个apikey负载均衡
280
- - version 3.0: 对chatglm和其他小型llm的支持
281
- - version 2.6: 重构了插件结构,提高了交互性,加入更多插件
282
- - version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
283
- - version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。
284
- - version 2.3: 增强多线程交互性
285
- - version 2.2: 函数插件支持热重载
286
- - version 2.1: 可折叠式布局
287
- - version 2.0: 引入模块化函数插件
288
- - version 1.0: 基础功能
289
-
290
- ## 参考与学习
291
-
292
- ```
293
- 代码中参考了很多其他优秀项目中的设计,主要包括:
294
-
295
- # 借鉴项目1:借鉴了ChuanhuChatGPT中诸多技巧
296
- https://github.com/GaiZhenbiao/ChuanhuChatGPT
297
-
298
- # 借鉴项目2:清华ChatGLM-6B:
299
- https://github.com/THUDM/ChatGLM-6B
300
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py DELETED
@@ -1,882 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import numpy as np
20
- import PIL
21
- import torch
22
- from packaging import version
23
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
24
-
25
- from diffusers.utils import is_accelerate_available, is_accelerate_version
26
-
27
- from ...configuration_utils import FrozenDict
28
- from ...image_processor import VaeImageProcessor
29
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
30
- from ...models import AutoencoderKL, UNet2DConditionModel
31
- from ...schedulers import DDIMScheduler
32
- from ...utils import PIL_INTERPOLATION, deprecate, logging, randn_tensor
33
- from ..pipeline_utils import DiffusionPipeline
34
- from . import StableDiffusionPipelineOutput
35
- from .safety_checker import StableDiffusionSafetyChecker
36
-
37
-
38
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
-
40
-
41
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
42
- def preprocess(image):
43
- warnings.warn(
44
- "The preprocess method is deprecated and will be removed in a future version. Please"
45
- " use VaeImageProcessor.preprocess instead",
46
- FutureWarning,
47
- )
48
- if isinstance(image, torch.Tensor):
49
- return image
50
- elif isinstance(image, PIL.Image.Image):
51
- image = [image]
52
-
53
- if isinstance(image[0], PIL.Image.Image):
54
- w, h = image[0].size
55
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
56
-
57
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
58
- image = np.concatenate(image, axis=0)
59
- image = np.array(image).astype(np.float32) / 255.0
60
- image = image.transpose(0, 3, 1, 2)
61
- image = 2.0 * image - 1.0
62
- image = torch.from_numpy(image)
63
- elif isinstance(image[0], torch.Tensor):
64
- image = torch.cat(image, dim=0)
65
- return image
66
-
67
-
68
- def posterior_sample(scheduler, latents, timestep, clean_latents, generator, eta):
69
- # 1. get previous step value (=t-1)
70
- prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
71
-
72
- if prev_timestep <= 0:
73
- return clean_latents
74
-
75
- # 2. compute alphas, betas
76
- alpha_prod_t = scheduler.alphas_cumprod[timestep]
77
- alpha_prod_t_prev = (
78
- scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
79
- )
80
-
81
- variance = scheduler._get_variance(timestep, prev_timestep)
82
- std_dev_t = eta * variance ** (0.5)
83
-
84
- # direction pointing to x_t
85
- e_t = (latents - alpha_prod_t ** (0.5) * clean_latents) / (1 - alpha_prod_t) ** (0.5)
86
- dir_xt = (1.0 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * e_t
87
- noise = std_dev_t * randn_tensor(
88
- clean_latents.shape, dtype=clean_latents.dtype, device=clean_latents.device, generator=generator
89
- )
90
- prev_latents = alpha_prod_t_prev ** (0.5) * clean_latents + dir_xt + noise
91
-
92
- return prev_latents
93
-
94
-
95
- def compute_noise(scheduler, prev_latents, latents, timestep, noise_pred, eta):
96
- # 1. get previous step value (=t-1)
97
- prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
98
-
99
- # 2. compute alphas, betas
100
- alpha_prod_t = scheduler.alphas_cumprod[timestep]
101
- alpha_prod_t_prev = (
102
- scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
103
- )
104
-
105
- beta_prod_t = 1 - alpha_prod_t
106
-
107
- # 3. compute predicted original sample from predicted noise also called
108
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
109
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
110
-
111
- # 4. Clip "predicted x_0"
112
- if scheduler.config.clip_sample:
113
- pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
114
-
115
- # 5. compute variance: "sigma_t(η)" -> see formula (16)
116
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
117
- variance = scheduler._get_variance(timestep, prev_timestep)
118
- std_dev_t = eta * variance ** (0.5)
119
-
120
- # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
121
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred
122
-
123
- noise = (prev_latents - (alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction)) / (
124
- variance ** (0.5) * eta
125
- )
126
- return noise
127
-
128
-
129
- class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
130
- r"""
131
- Pipeline for text-guided image to image generation using Stable Diffusion.
132
-
133
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
134
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
135
-
136
- Args:
137
- vae ([`AutoencoderKL`]):
138
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
139
- text_encoder ([`~transformers.CLIPTextModel`]):
140
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
141
- tokenizer ([`~transformers.CLIPTokenizer`]):
142
- A `CLIPTokenizer` to tokenize text.
143
- unet ([`UNet2DConditionModel`]):
144
- A `UNet2DConditionModel` to denoise the encoded image latents.
145
- scheduler ([`SchedulerMixin`]):
146
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can only be an
147
- instance of [`DDIMScheduler`].
148
- safety_checker ([`StableDiffusionSafetyChecker`]):
149
- Classification module that estimates whether generated images could be considered offensive or harmful.
150
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
151
- about a model's potential harms.
152
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
153
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
154
- """
155
- _optional_components = ["safety_checker", "feature_extractor"]
156
-
157
- def __init__(
158
- self,
159
- vae: AutoencoderKL,
160
- text_encoder: CLIPTextModel,
161
- tokenizer: CLIPTokenizer,
162
- unet: UNet2DConditionModel,
163
- scheduler: DDIMScheduler,
164
- safety_checker: StableDiffusionSafetyChecker,
165
- feature_extractor: CLIPImageProcessor,
166
- requires_safety_checker: bool = True,
167
- ):
168
- super().__init__()
169
-
170
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
171
- deprecation_message = (
172
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
173
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
174
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
175
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
176
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
177
- " file"
178
- )
179
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
180
- new_config = dict(scheduler.config)
181
- new_config["steps_offset"] = 1
182
- scheduler._internal_dict = FrozenDict(new_config)
183
-
184
- if safety_checker is None and requires_safety_checker:
185
- logger.warning(
186
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
187
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
188
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
189
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
190
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
191
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
192
- )
193
-
194
- if safety_checker is not None and feature_extractor is None:
195
- raise ValueError(
196
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
197
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
198
- )
199
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
200
- version.parse(unet.config._diffusers_version).base_version
201
- ) < version.parse("0.9.0.dev0")
202
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
203
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
204
- deprecation_message = (
205
- "The configuration file of the unet has set the default `sample_size` to smaller than"
206
- " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
207
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
208
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
209
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
210
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
211
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
212
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
213
- " the `unet/config.json` file"
214
- )
215
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
216
- new_config = dict(unet.config)
217
- new_config["sample_size"] = 64
218
- unet._internal_dict = FrozenDict(new_config)
219
-
220
- self.register_modules(
221
- vae=vae,
222
- text_encoder=text_encoder,
223
- tokenizer=tokenizer,
224
- unet=unet,
225
- scheduler=scheduler,
226
- safety_checker=safety_checker,
227
- feature_extractor=feature_extractor,
228
- )
229
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
230
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
231
- self.register_to_config(requires_safety_checker=requires_safety_checker)
232
-
233
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
234
- def enable_model_cpu_offload(self, gpu_id=0):
235
- r"""
236
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
237
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
238
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
239
- iterative execution of the `unet`.
240
- """
241
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
242
- from accelerate import cpu_offload_with_hook
243
- else:
244
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
245
-
246
- device = torch.device(f"cuda:{gpu_id}")
247
-
248
- if self.device.type != "cpu":
249
- self.to("cpu", silence_dtype_warnings=True)
250
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
251
-
252
- hook = None
253
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
254
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
255
-
256
- if self.safety_checker is not None:
257
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
258
-
259
- # We'll offload the last model manually.
260
- self.final_offload_hook = hook
261
-
262
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
263
- def _encode_prompt(
264
- self,
265
- prompt,
266
- device,
267
- num_images_per_prompt,
268
- do_classifier_free_guidance,
269
- negative_prompt=None,
270
- prompt_embeds: Optional[torch.FloatTensor] = None,
271
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
272
- lora_scale: Optional[float] = None,
273
- ):
274
- r"""
275
- Encodes the prompt into text encoder hidden states.
276
-
277
- Args:
278
- prompt (`str` or `List[str]`, *optional*):
279
- prompt to be encoded
280
- device: (`torch.device`):
281
- torch device
282
- num_images_per_prompt (`int`):
283
- number of images that should be generated per prompt
284
- do_classifier_free_guidance (`bool`):
285
- whether to use classifier free guidance or not
286
- negative_prompt (`str` or `List[str]`, *optional*):
287
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
288
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
289
- less than `1`).
290
- prompt_embeds (`torch.FloatTensor`, *optional*):
291
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
292
- provided, text embeddings will be generated from `prompt` input argument.
293
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
294
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
295
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
296
- argument.
297
- lora_scale (`float`, *optional*):
298
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
299
- """
300
- # set lora scale so that monkey patched LoRA
301
- # function of text encoder can correctly access it
302
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
303
- self._lora_scale = lora_scale
304
-
305
- if prompt is not None and isinstance(prompt, str):
306
- batch_size = 1
307
- elif prompt is not None and isinstance(prompt, list):
308
- batch_size = len(prompt)
309
- else:
310
- batch_size = prompt_embeds.shape[0]
311
-
312
- if prompt_embeds is None:
313
- # textual inversion: procecss multi-vector tokens if necessary
314
- if isinstance(self, TextualInversionLoaderMixin):
315
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
316
-
317
- text_inputs = self.tokenizer(
318
- prompt,
319
- padding="max_length",
320
- max_length=self.tokenizer.model_max_length,
321
- truncation=True,
322
- return_tensors="pt",
323
- )
324
- text_input_ids = text_inputs.input_ids
325
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
326
-
327
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
328
- text_input_ids, untruncated_ids
329
- ):
330
- removed_text = self.tokenizer.batch_decode(
331
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
332
- )
333
- logger.warning(
334
- "The following part of your input was truncated because CLIP can only handle sequences up to"
335
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
336
- )
337
-
338
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
339
- attention_mask = text_inputs.attention_mask.to(device)
340
- else:
341
- attention_mask = None
342
-
343
- prompt_embeds = self.text_encoder(
344
- text_input_ids.to(device),
345
- attention_mask=attention_mask,
346
- )
347
- prompt_embeds = prompt_embeds[0]
348
-
349
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
350
-
351
- bs_embed, seq_len, _ = prompt_embeds.shape
352
- # duplicate text embeddings for each generation per prompt, using mps friendly method
353
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
354
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
355
-
356
- # get unconditional embeddings for classifier free guidance
357
- if do_classifier_free_guidance and negative_prompt_embeds is None:
358
- uncond_tokens: List[str]
359
- if negative_prompt is None:
360
- uncond_tokens = [""] * batch_size
361
- elif prompt is not None and type(prompt) is not type(negative_prompt):
362
- raise TypeError(
363
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
364
- f" {type(prompt)}."
365
- )
366
- elif isinstance(negative_prompt, str):
367
- uncond_tokens = [negative_prompt]
368
- elif batch_size != len(negative_prompt):
369
- raise ValueError(
370
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
371
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
372
- " the batch size of `prompt`."
373
- )
374
- else:
375
- uncond_tokens = negative_prompt
376
-
377
- # textual inversion: procecss multi-vector tokens if necessary
378
- if isinstance(self, TextualInversionLoaderMixin):
379
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
380
-
381
- max_length = prompt_embeds.shape[1]
382
- uncond_input = self.tokenizer(
383
- uncond_tokens,
384
- padding="max_length",
385
- max_length=max_length,
386
- truncation=True,
387
- return_tensors="pt",
388
- )
389
-
390
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
391
- attention_mask = uncond_input.attention_mask.to(device)
392
- else:
393
- attention_mask = None
394
-
395
- negative_prompt_embeds = self.text_encoder(
396
- uncond_input.input_ids.to(device),
397
- attention_mask=attention_mask,
398
- )
399
- negative_prompt_embeds = negative_prompt_embeds[0]
400
-
401
- if do_classifier_free_guidance:
402
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
403
- seq_len = negative_prompt_embeds.shape[1]
404
-
405
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
406
-
407
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
408
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
409
-
410
- # For classifier free guidance, we need to do two forward passes.
411
- # Here we concatenate the unconditional and text embeddings into a single batch
412
- # to avoid doing two forward passes
413
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
414
-
415
- return prompt_embeds
416
-
417
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.check_inputs
418
- def check_inputs(
419
- self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
420
- ):
421
- if strength < 0 or strength > 1:
422
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
423
-
424
- if (callback_steps is None) or (
425
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
426
- ):
427
- raise ValueError(
428
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
429
- f" {type(callback_steps)}."
430
- )
431
-
432
- if prompt is not None and prompt_embeds is not None:
433
- raise ValueError(
434
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
435
- " only forward one of the two."
436
- )
437
- elif prompt is None and prompt_embeds is None:
438
- raise ValueError(
439
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
440
- )
441
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
442
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
443
-
444
- if negative_prompt is not None and negative_prompt_embeds is not None:
445
- raise ValueError(
446
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
447
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
448
- )
449
-
450
- if prompt_embeds is not None and negative_prompt_embeds is not None:
451
- if prompt_embeds.shape != negative_prompt_embeds.shape:
452
- raise ValueError(
453
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
454
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
455
- f" {negative_prompt_embeds.shape}."
456
- )
457
-
458
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
459
- def prepare_extra_step_kwargs(self, generator, eta):
460
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
461
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
462
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
463
- # and should be between [0, 1]
464
-
465
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
466
- extra_step_kwargs = {}
467
- if accepts_eta:
468
- extra_step_kwargs["eta"] = eta
469
-
470
- # check if the scheduler accepts generator
471
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
472
- if accepts_generator:
473
- extra_step_kwargs["generator"] = generator
474
- return extra_step_kwargs
475
-
476
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
477
- def run_safety_checker(self, image, device, dtype):
478
- if self.safety_checker is None:
479
- has_nsfw_concept = None
480
- else:
481
- if torch.is_tensor(image):
482
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
483
- else:
484
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
485
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
486
- image, has_nsfw_concept = self.safety_checker(
487
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
488
- )
489
- return image, has_nsfw_concept
490
-
491
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
492
- def decode_latents(self, latents):
493
- warnings.warn(
494
- "The decode_latents method is deprecated and will be removed in a future version. Please"
495
- " use VaeImageProcessor instead",
496
- FutureWarning,
497
- )
498
- latents = 1 / self.vae.config.scaling_factor * latents
499
- image = self.vae.decode(latents, return_dict=False)[0]
500
- image = (image / 2 + 0.5).clamp(0, 1)
501
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
502
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
503
- return image
504
-
505
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
506
- def get_timesteps(self, num_inference_steps, strength, device):
507
- # get the original timestep using init_timestep
508
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
509
-
510
- t_start = max(num_inference_steps - init_timestep, 0)
511
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
512
-
513
- return timesteps, num_inference_steps - t_start
514
-
515
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
516
- image = image.to(device=device, dtype=dtype)
517
-
518
- batch_size = image.shape[0]
519
-
520
- if image.shape[1] == 4:
521
- init_latents = image
522
-
523
- else:
524
- if isinstance(generator, list) and len(generator) != batch_size:
525
- raise ValueError(
526
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
527
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
528
- )
529
-
530
- if isinstance(generator, list):
531
- init_latents = [
532
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
533
- ]
534
- init_latents = torch.cat(init_latents, dim=0)
535
- else:
536
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
537
-
538
- init_latents = self.vae.config.scaling_factor * init_latents
539
-
540
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
541
- # expand init_latents for batch_size
542
- deprecation_message = (
543
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
544
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
545
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
546
- " your script to pass as many initial images as text prompts to suppress this warning."
547
- )
548
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
549
- additional_image_per_prompt = batch_size // init_latents.shape[0]
550
- init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0)
551
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
552
- raise ValueError(
553
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
554
- )
555
- else:
556
- init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
557
-
558
- # add noise to latents using the timestep
559
- shape = init_latents.shape
560
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
561
-
562
- # get latents
563
- clean_latents = init_latents
564
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
565
- latents = init_latents
566
-
567
- return latents, clean_latents
568
-
569
- @torch.no_grad()
570
- def __call__(
571
- self,
572
- prompt: Union[str, List[str]],
573
- source_prompt: Union[str, List[str]],
574
- image: Union[
575
- torch.FloatTensor,
576
- PIL.Image.Image,
577
- np.ndarray,
578
- List[torch.FloatTensor],
579
- List[PIL.Image.Image],
580
- List[np.ndarray],
581
- ] = None,
582
- strength: float = 0.8,
583
- num_inference_steps: Optional[int] = 50,
584
- guidance_scale: Optional[float] = 7.5,
585
- source_guidance_scale: Optional[float] = 1,
586
- num_images_per_prompt: Optional[int] = 1,
587
- eta: Optional[float] = 0.1,
588
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
589
- prompt_embeds: Optional[torch.FloatTensor] = None,
590
- output_type: Optional[str] = "pil",
591
- return_dict: bool = True,
592
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
593
- callback_steps: int = 1,
594
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
595
- ):
596
- r"""
597
- The call function to the pipeline for generation.
598
-
599
- Args:
600
- prompt (`str` or `List[str]`):
601
- The prompt or prompts to guide the image generation.
602
- image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
603
- `Image` or tensor representing an image batch to be used as the starting point. Can also accept image
604
- latents as `image`, but if passing latents directly it is not encoded again.
605
- strength (`float`, *optional*, defaults to 0.8):
606
- Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
607
- starting point and more noise is added the higher the `strength`. The number of denoising steps depends
608
- on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
609
- process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
610
- essentially ignores `image`.
611
- num_inference_steps (`int`, *optional*, defaults to 50):
612
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
613
- expense of slower inference. This parameter is modulated by `strength`.
614
- guidance_scale (`float`, *optional*, defaults to 7.5):
615
- A higher guidance scale value encourages the model to generate images closely linked to the text
616
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
617
- source_guidance_scale (`float`, *optional*, defaults to 1):
618
- Guidance scale for the source prompt. This is useful to control the amount of influence the source
619
- prompt has for encoding.
620
- num_images_per_prompt (`int`, *optional*, defaults to 1):
621
- The number of images to generate per prompt.
622
- eta (`float`, *optional*, defaults to 0.0):
623
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
624
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
625
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
626
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
627
- generation deterministic.
628
- prompt_embeds (`torch.FloatTensor`, *optional*):
629
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
630
- provided, text embeddings are generated from the `prompt` input argument.
631
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
632
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
633
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
634
- output_type (`str`, *optional*, defaults to `"pil"`):
635
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
636
- return_dict (`bool`, *optional*, defaults to `True`):
637
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
638
- plain tuple.
639
- callback (`Callable`, *optional*):
640
- A function that calls every `callback_steps` steps during inference. The function is called with the
641
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
642
- callback_steps (`int`, *optional*, defaults to 1):
643
- The frequency at which the `callback` function is called. If not specified, the callback is called at
644
- every step.
645
- cross_attention_kwargs (`dict`, *optional*):
646
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
647
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
648
-
649
- Example:
650
-
651
- ```py
652
- import requests
653
- import torch
654
- from PIL import Image
655
- from io import BytesIO
656
-
657
- from diffusers import CycleDiffusionPipeline, DDIMScheduler
658
-
659
- # load the pipeline
660
- # make sure you're logged in with `huggingface-cli login`
661
- model_id_or_path = "CompVis/stable-diffusion-v1-4"
662
- scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler")
663
- pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
664
-
665
- # let's download an initial image
666
- url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png"
667
- response = requests.get(url)
668
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
669
- init_image = init_image.resize((512, 512))
670
- init_image.save("horse.png")
671
-
672
- # let's specify a prompt
673
- source_prompt = "An astronaut riding a horse"
674
- prompt = "An astronaut riding an elephant"
675
-
676
- # call the pipeline
677
- image = pipe(
678
- prompt=prompt,
679
- source_prompt=source_prompt,
680
- image=init_image,
681
- num_inference_steps=100,
682
- eta=0.1,
683
- strength=0.8,
684
- guidance_scale=2,
685
- source_guidance_scale=1,
686
- ).images[0]
687
-
688
- image.save("horse_to_elephant.png")
689
-
690
- # let's try another example
691
- # See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion
692
- url = (
693
- "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png"
694
- )
695
- response = requests.get(url)
696
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
697
- init_image = init_image.resize((512, 512))
698
- init_image.save("black.png")
699
-
700
- source_prompt = "A black colored car"
701
- prompt = "A blue colored car"
702
-
703
- # call the pipeline
704
- torch.manual_seed(0)
705
- image = pipe(
706
- prompt=prompt,
707
- source_prompt=source_prompt,
708
- image=init_image,
709
- num_inference_steps=100,
710
- eta=0.1,
711
- strength=0.85,
712
- guidance_scale=3,
713
- source_guidance_scale=1,
714
- ).images[0]
715
-
716
- image.save("black_to_blue.png")
717
- ```
718
-
719
- Returns:
720
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
721
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
722
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
723
- second element is a list of `bool`s indicating whether the corresponding generated image contains
724
- "not-safe-for-work" (nsfw) content.
725
- """
726
- # 1. Check inputs
727
- self.check_inputs(prompt, strength, callback_steps)
728
-
729
- # 2. Define call parameters
730
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
731
- device = self._execution_device
732
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
733
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
734
- # corresponds to doing no classifier free guidance.
735
- do_classifier_free_guidance = guidance_scale > 1.0
736
-
737
- # 3. Encode input prompt
738
- text_encoder_lora_scale = (
739
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
740
- )
741
- prompt_embeds = self._encode_prompt(
742
- prompt,
743
- device,
744
- num_images_per_prompt,
745
- do_classifier_free_guidance,
746
- prompt_embeds=prompt_embeds,
747
- lora_scale=text_encoder_lora_scale,
748
- )
749
- source_prompt_embeds = self._encode_prompt(
750
- source_prompt, device, num_images_per_prompt, do_classifier_free_guidance, None
751
- )
752
-
753
- # 4. Preprocess image
754
- image = self.image_processor.preprocess(image)
755
-
756
- # 5. Prepare timesteps
757
- self.scheduler.set_timesteps(num_inference_steps, device=device)
758
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
759
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
760
-
761
- # 6. Prepare latent variables
762
- latents, clean_latents = self.prepare_latents(
763
- image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
764
- )
765
- source_latents = latents
766
-
767
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
768
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
769
- generator = extra_step_kwargs.pop("generator", None)
770
-
771
- # 8. Denoising loop
772
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
773
- with self.progress_bar(total=num_inference_steps) as progress_bar:
774
- for i, t in enumerate(timesteps):
775
- # expand the latents if we are doing classifier free guidance
776
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
777
- source_latent_model_input = (
778
- torch.cat([source_latents] * 2) if do_classifier_free_guidance else source_latents
779
- )
780
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
781
- source_latent_model_input = self.scheduler.scale_model_input(source_latent_model_input, t)
782
-
783
- # predict the noise residual
784
- if do_classifier_free_guidance:
785
- concat_latent_model_input = torch.stack(
786
- [
787
- source_latent_model_input[0],
788
- latent_model_input[0],
789
- source_latent_model_input[1],
790
- latent_model_input[1],
791
- ],
792
- dim=0,
793
- )
794
- concat_prompt_embeds = torch.stack(
795
- [
796
- source_prompt_embeds[0],
797
- prompt_embeds[0],
798
- source_prompt_embeds[1],
799
- prompt_embeds[1],
800
- ],
801
- dim=0,
802
- )
803
- else:
804
- concat_latent_model_input = torch.cat(
805
- [
806
- source_latent_model_input,
807
- latent_model_input,
808
- ],
809
- dim=0,
810
- )
811
- concat_prompt_embeds = torch.cat(
812
- [
813
- source_prompt_embeds,
814
- prompt_embeds,
815
- ],
816
- dim=0,
817
- )
818
-
819
- concat_noise_pred = self.unet(
820
- concat_latent_model_input,
821
- t,
822
- cross_attention_kwargs=cross_attention_kwargs,
823
- encoder_hidden_states=concat_prompt_embeds,
824
- ).sample
825
-
826
- # perform guidance
827
- if do_classifier_free_guidance:
828
- (
829
- source_noise_pred_uncond,
830
- noise_pred_uncond,
831
- source_noise_pred_text,
832
- noise_pred_text,
833
- ) = concat_noise_pred.chunk(4, dim=0)
834
-
835
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
836
- source_noise_pred = source_noise_pred_uncond + source_guidance_scale * (
837
- source_noise_pred_text - source_noise_pred_uncond
838
- )
839
-
840
- else:
841
- (source_noise_pred, noise_pred) = concat_noise_pred.chunk(2, dim=0)
842
-
843
- # Sample source_latents from the posterior distribution.
844
- prev_source_latents = posterior_sample(
845
- self.scheduler, source_latents, t, clean_latents, generator=generator, **extra_step_kwargs
846
- )
847
- # Compute noise.
848
- noise = compute_noise(
849
- self.scheduler, prev_source_latents, source_latents, t, source_noise_pred, **extra_step_kwargs
850
- )
851
- source_latents = prev_source_latents
852
-
853
- # compute the previous noisy sample x_t -> x_t-1
854
- latents = self.scheduler.step(
855
- noise_pred, t, latents, variance_noise=noise, **extra_step_kwargs
856
- ).prev_sample
857
-
858
- # call the callback, if provided
859
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
860
- progress_bar.update()
861
- if callback is not None and i % callback_steps == 0:
862
- callback(i, t, latents)
863
-
864
- # 9. Post-processing
865
- if not output_type == "latent":
866
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
867
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
868
- else:
869
- image = latents
870
- has_nsfw_concept = None
871
-
872
- if has_nsfw_concept is None:
873
- do_denormalize = [True] * image.shape[0]
874
- else:
875
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
876
-
877
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
878
-
879
- if not return_dict:
880
- return (image, has_nsfw_concept)
881
-
882
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_base/run.sh DELETED
@@ -1,10 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- work_path=$(dirname $0)
4
- PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
5
- python -m torch.distributed.launch --nproc_per_node=8 \
6
- tools/train.py ${work_path}/config.py \
7
- --launcher pytorch \
8
- --cfg-options model.backbone.pretrained_path='your_model_path/uniformer_base_in1k.pth' \
9
- --work-dir ${work_path}/ckpt \
10
- 2>&1 | tee -a ${work_path}/log.txt
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnet.py DELETED
@@ -1,663 +0,0 @@
1
- import torch.nn as nn
2
- import torch.utils.checkpoint as cp
3
- from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
4
- constant_init, kaiming_init)
5
- from mmcv.runner import load_checkpoint
6
- from torch.nn.modules.batchnorm import _BatchNorm
7
-
8
- from mmdet.utils import get_root_logger
9
- from ..builder import BACKBONES
10
- from ..utils import ResLayer
11
-
12
-
13
- class BasicBlock(nn.Module):
14
- expansion = 1
15
-
16
- def __init__(self,
17
- inplanes,
18
- planes,
19
- stride=1,
20
- dilation=1,
21
- downsample=None,
22
- style='pytorch',
23
- with_cp=False,
24
- conv_cfg=None,
25
- norm_cfg=dict(type='BN'),
26
- dcn=None,
27
- plugins=None):
28
- super(BasicBlock, self).__init__()
29
- assert dcn is None, 'Not implemented yet.'
30
- assert plugins is None, 'Not implemented yet.'
31
-
32
- self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
33
- self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
34
-
35
- self.conv1 = build_conv_layer(
36
- conv_cfg,
37
- inplanes,
38
- planes,
39
- 3,
40
- stride=stride,
41
- padding=dilation,
42
- dilation=dilation,
43
- bias=False)
44
- self.add_module(self.norm1_name, norm1)
45
- self.conv2 = build_conv_layer(
46
- conv_cfg, planes, planes, 3, padding=1, bias=False)
47
- self.add_module(self.norm2_name, norm2)
48
-
49
- self.relu = nn.ReLU(inplace=True)
50
- self.downsample = downsample
51
- self.stride = stride
52
- self.dilation = dilation
53
- self.with_cp = with_cp
54
-
55
- @property
56
- def norm1(self):
57
- """nn.Module: normalization layer after the first convolution layer"""
58
- return getattr(self, self.norm1_name)
59
-
60
- @property
61
- def norm2(self):
62
- """nn.Module: normalization layer after the second convolution layer"""
63
- return getattr(self, self.norm2_name)
64
-
65
- def forward(self, x):
66
- """Forward function."""
67
-
68
- def _inner_forward(x):
69
- identity = x
70
-
71
- out = self.conv1(x)
72
- out = self.norm1(out)
73
- out = self.relu(out)
74
-
75
- out = self.conv2(out)
76
- out = self.norm2(out)
77
-
78
- if self.downsample is not None:
79
- identity = self.downsample(x)
80
-
81
- out += identity
82
-
83
- return out
84
-
85
- if self.with_cp and x.requires_grad:
86
- out = cp.checkpoint(_inner_forward, x)
87
- else:
88
- out = _inner_forward(x)
89
-
90
- out = self.relu(out)
91
-
92
- return out
93
-
94
-
95
- class Bottleneck(nn.Module):
96
- expansion = 4
97
-
98
- def __init__(self,
99
- inplanes,
100
- planes,
101
- stride=1,
102
- dilation=1,
103
- downsample=None,
104
- style='pytorch',
105
- with_cp=False,
106
- conv_cfg=None,
107
- norm_cfg=dict(type='BN'),
108
- dcn=None,
109
- plugins=None):
110
- """Bottleneck block for ResNet.
111
-
112
- If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
113
- it is "caffe", the stride-two layer is the first 1x1 conv layer.
114
- """
115
- super(Bottleneck, self).__init__()
116
- assert style in ['pytorch', 'caffe']
117
- assert dcn is None or isinstance(dcn, dict)
118
- assert plugins is None or isinstance(plugins, list)
119
- if plugins is not None:
120
- allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
121
- assert all(p['position'] in allowed_position for p in plugins)
122
-
123
- self.inplanes = inplanes
124
- self.planes = planes
125
- self.stride = stride
126
- self.dilation = dilation
127
- self.style = style
128
- self.with_cp = with_cp
129
- self.conv_cfg = conv_cfg
130
- self.norm_cfg = norm_cfg
131
- self.dcn = dcn
132
- self.with_dcn = dcn is not None
133
- self.plugins = plugins
134
- self.with_plugins = plugins is not None
135
-
136
- if self.with_plugins:
137
- # collect plugins for conv1/conv2/conv3
138
- self.after_conv1_plugins = [
139
- plugin['cfg'] for plugin in plugins
140
- if plugin['position'] == 'after_conv1'
141
- ]
142
- self.after_conv2_plugins = [
143
- plugin['cfg'] for plugin in plugins
144
- if plugin['position'] == 'after_conv2'
145
- ]
146
- self.after_conv3_plugins = [
147
- plugin['cfg'] for plugin in plugins
148
- if plugin['position'] == 'after_conv3'
149
- ]
150
-
151
- if self.style == 'pytorch':
152
- self.conv1_stride = 1
153
- self.conv2_stride = stride
154
- else:
155
- self.conv1_stride = stride
156
- self.conv2_stride = 1
157
-
158
- self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
159
- self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
160
- self.norm3_name, norm3 = build_norm_layer(
161
- norm_cfg, planes * self.expansion, postfix=3)
162
-
163
- self.conv1 = build_conv_layer(
164
- conv_cfg,
165
- inplanes,
166
- planes,
167
- kernel_size=1,
168
- stride=self.conv1_stride,
169
- bias=False)
170
- self.add_module(self.norm1_name, norm1)
171
- fallback_on_stride = False
172
- if self.with_dcn:
173
- fallback_on_stride = dcn.pop('fallback_on_stride', False)
174
- if not self.with_dcn or fallback_on_stride:
175
- self.conv2 = build_conv_layer(
176
- conv_cfg,
177
- planes,
178
- planes,
179
- kernel_size=3,
180
- stride=self.conv2_stride,
181
- padding=dilation,
182
- dilation=dilation,
183
- bias=False)
184
- else:
185
- assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
186
- self.conv2 = build_conv_layer(
187
- dcn,
188
- planes,
189
- planes,
190
- kernel_size=3,
191
- stride=self.conv2_stride,
192
- padding=dilation,
193
- dilation=dilation,
194
- bias=False)
195
-
196
- self.add_module(self.norm2_name, norm2)
197
- self.conv3 = build_conv_layer(
198
- conv_cfg,
199
- planes,
200
- planes * self.expansion,
201
- kernel_size=1,
202
- bias=False)
203
- self.add_module(self.norm3_name, norm3)
204
-
205
- self.relu = nn.ReLU(inplace=True)
206
- self.downsample = downsample
207
-
208
- if self.with_plugins:
209
- self.after_conv1_plugin_names = self.make_block_plugins(
210
- planes, self.after_conv1_plugins)
211
- self.after_conv2_plugin_names = self.make_block_plugins(
212
- planes, self.after_conv2_plugins)
213
- self.after_conv3_plugin_names = self.make_block_plugins(
214
- planes * self.expansion, self.after_conv3_plugins)
215
-
216
- def make_block_plugins(self, in_channels, plugins):
217
- """make plugins for block.
218
-
219
- Args:
220
- in_channels (int): Input channels of plugin.
221
- plugins (list[dict]): List of plugins cfg to build.
222
-
223
- Returns:
224
- list[str]: List of the names of plugin.
225
- """
226
- assert isinstance(plugins, list)
227
- plugin_names = []
228
- for plugin in plugins:
229
- plugin = plugin.copy()
230
- name, layer = build_plugin_layer(
231
- plugin,
232
- in_channels=in_channels,
233
- postfix=plugin.pop('postfix', ''))
234
- assert not hasattr(self, name), f'duplicate plugin {name}'
235
- self.add_module(name, layer)
236
- plugin_names.append(name)
237
- return plugin_names
238
-
239
- def forward_plugin(self, x, plugin_names):
240
- out = x
241
- for name in plugin_names:
242
- out = getattr(self, name)(x)
243
- return out
244
-
245
- @property
246
- def norm1(self):
247
- """nn.Module: normalization layer after the first convolution layer"""
248
- return getattr(self, self.norm1_name)
249
-
250
- @property
251
- def norm2(self):
252
- """nn.Module: normalization layer after the second convolution layer"""
253
- return getattr(self, self.norm2_name)
254
-
255
- @property
256
- def norm3(self):
257
- """nn.Module: normalization layer after the third convolution layer"""
258
- return getattr(self, self.norm3_name)
259
-
260
- def forward(self, x):
261
- """Forward function."""
262
-
263
- def _inner_forward(x):
264
- identity = x
265
- out = self.conv1(x)
266
- out = self.norm1(out)
267
- out = self.relu(out)
268
-
269
- if self.with_plugins:
270
- out = self.forward_plugin(out, self.after_conv1_plugin_names)
271
-
272
- out = self.conv2(out)
273
- out = self.norm2(out)
274
- out = self.relu(out)
275
-
276
- if self.with_plugins:
277
- out = self.forward_plugin(out, self.after_conv2_plugin_names)
278
-
279
- out = self.conv3(out)
280
- out = self.norm3(out)
281
-
282
- if self.with_plugins:
283
- out = self.forward_plugin(out, self.after_conv3_plugin_names)
284
-
285
- if self.downsample is not None:
286
- identity = self.downsample(x)
287
-
288
- out += identity
289
-
290
- return out
291
-
292
- if self.with_cp and x.requires_grad:
293
- out = cp.checkpoint(_inner_forward, x)
294
- else:
295
- out = _inner_forward(x)
296
-
297
- out = self.relu(out)
298
-
299
- return out
300
-
301
-
302
- @BACKBONES.register_module()
303
- class ResNet(nn.Module):
304
- """ResNet backbone.
305
-
306
- Args:
307
- depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
308
- stem_channels (int | None): Number of stem channels. If not specified,
309
- it will be the same as `base_channels`. Default: None.
310
- base_channels (int): Number of base channels of res layer. Default: 64.
311
- in_channels (int): Number of input image channels. Default: 3.
312
- num_stages (int): Resnet stages. Default: 4.
313
- strides (Sequence[int]): Strides of the first block of each stage.
314
- dilations (Sequence[int]): Dilation of each stage.
315
- out_indices (Sequence[int]): Output from which stages.
316
- style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
317
- layer is the 3x3 conv layer, otherwise the stride-two layer is
318
- the first 1x1 conv layer.
319
- deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
320
- avg_down (bool): Use AvgPool instead of stride conv when
321
- downsampling in the bottleneck.
322
- frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
323
- -1 means not freezing any parameters.
324
- norm_cfg (dict): Dictionary to construct and config norm layer.
325
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
326
- freeze running stats (mean and var). Note: Effect on Batch Norm
327
- and its variants only.
328
- plugins (list[dict]): List of plugins for stages, each dict contains:
329
-
330
- - cfg (dict, required): Cfg dict to build plugin.
331
- - position (str, required): Position inside block to insert
332
- plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
333
- - stages (tuple[bool], optional): Stages to apply plugin, length
334
- should be same as 'num_stages'.
335
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
336
- memory while slowing down the training speed.
337
- zero_init_residual (bool): Whether to use zero init for last norm layer
338
- in resblocks to let them behave as identity.
339
-
340
- Example:
341
- >>> from mmdet.models import ResNet
342
- >>> import torch
343
- >>> self = ResNet(depth=18)
344
- >>> self.eval()
345
- >>> inputs = torch.rand(1, 3, 32, 32)
346
- >>> level_outputs = self.forward(inputs)
347
- >>> for level_out in level_outputs:
348
- ... print(tuple(level_out.shape))
349
- (1, 64, 8, 8)
350
- (1, 128, 4, 4)
351
- (1, 256, 2, 2)
352
- (1, 512, 1, 1)
353
- """
354
-
355
- arch_settings = {
356
- 18: (BasicBlock, (2, 2, 2, 2)),
357
- 34: (BasicBlock, (3, 4, 6, 3)),
358
- 50: (Bottleneck, (3, 4, 6, 3)),
359
- 101: (Bottleneck, (3, 4, 23, 3)),
360
- 152: (Bottleneck, (3, 8, 36, 3))
361
- }
362
-
363
- def __init__(self,
364
- depth,
365
- in_channels=3,
366
- stem_channels=None,
367
- base_channels=64,
368
- num_stages=4,
369
- strides=(1, 2, 2, 2),
370
- dilations=(1, 1, 1, 1),
371
- out_indices=(0, 1, 2, 3),
372
- style='pytorch',
373
- deep_stem=False,
374
- avg_down=False,
375
- frozen_stages=-1,
376
- conv_cfg=None,
377
- norm_cfg=dict(type='BN', requires_grad=True),
378
- norm_eval=True,
379
- dcn=None,
380
- stage_with_dcn=(False, False, False, False),
381
- plugins=None,
382
- with_cp=False,
383
- zero_init_residual=True):
384
- super(ResNet, self).__init__()
385
- if depth not in self.arch_settings:
386
- raise KeyError(f'invalid depth {depth} for resnet')
387
- self.depth = depth
388
- if stem_channels is None:
389
- stem_channels = base_channels
390
- self.stem_channels = stem_channels
391
- self.base_channels = base_channels
392
- self.num_stages = num_stages
393
- assert num_stages >= 1 and num_stages <= 4
394
- self.strides = strides
395
- self.dilations = dilations
396
- assert len(strides) == len(dilations) == num_stages
397
- self.out_indices = out_indices
398
- assert max(out_indices) < num_stages
399
- self.style = style
400
- self.deep_stem = deep_stem
401
- self.avg_down = avg_down
402
- self.frozen_stages = frozen_stages
403
- self.conv_cfg = conv_cfg
404
- self.norm_cfg = norm_cfg
405
- self.with_cp = with_cp
406
- self.norm_eval = norm_eval
407
- self.dcn = dcn
408
- self.stage_with_dcn = stage_with_dcn
409
- if dcn is not None:
410
- assert len(stage_with_dcn) == num_stages
411
- self.plugins = plugins
412
- self.zero_init_residual = zero_init_residual
413
- self.block, stage_blocks = self.arch_settings[depth]
414
- self.stage_blocks = stage_blocks[:num_stages]
415
- self.inplanes = stem_channels
416
-
417
- self._make_stem_layer(in_channels, stem_channels)
418
-
419
- self.res_layers = []
420
- for i, num_blocks in enumerate(self.stage_blocks):
421
- stride = strides[i]
422
- dilation = dilations[i]
423
- dcn = self.dcn if self.stage_with_dcn[i] else None
424
- if plugins is not None:
425
- stage_plugins = self.make_stage_plugins(plugins, i)
426
- else:
427
- stage_plugins = None
428
- planes = base_channels * 2**i
429
- res_layer = self.make_res_layer(
430
- block=self.block,
431
- inplanes=self.inplanes,
432
- planes=planes,
433
- num_blocks=num_blocks,
434
- stride=stride,
435
- dilation=dilation,
436
- style=self.style,
437
- avg_down=self.avg_down,
438
- with_cp=with_cp,
439
- conv_cfg=conv_cfg,
440
- norm_cfg=norm_cfg,
441
- dcn=dcn,
442
- plugins=stage_plugins)
443
- self.inplanes = planes * self.block.expansion
444
- layer_name = f'layer{i + 1}'
445
- self.add_module(layer_name, res_layer)
446
- self.res_layers.append(layer_name)
447
-
448
- self._freeze_stages()
449
-
450
- self.feat_dim = self.block.expansion * base_channels * 2**(
451
- len(self.stage_blocks) - 1)
452
-
453
- def make_stage_plugins(self, plugins, stage_idx):
454
- """Make plugins for ResNet ``stage_idx`` th stage.
455
-
456
- Currently we support to insert ``context_block``,
457
- ``empirical_attention_block``, ``nonlocal_block`` into the backbone
458
- like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
459
- Bottleneck.
460
-
461
- An example of plugins format could be:
462
-
463
- Examples:
464
- >>> plugins=[
465
- ... dict(cfg=dict(type='xxx', arg1='xxx'),
466
- ... stages=(False, True, True, True),
467
- ... position='after_conv2'),
468
- ... dict(cfg=dict(type='yyy'),
469
- ... stages=(True, True, True, True),
470
- ... position='after_conv3'),
471
- ... dict(cfg=dict(type='zzz', postfix='1'),
472
- ... stages=(True, True, True, True),
473
- ... position='after_conv3'),
474
- ... dict(cfg=dict(type='zzz', postfix='2'),
475
- ... stages=(True, True, True, True),
476
- ... position='after_conv3')
477
- ... ]
478
- >>> self = ResNet(depth=18)
479
- >>> stage_plugins = self.make_stage_plugins(plugins, 0)
480
- >>> assert len(stage_plugins) == 3
481
-
482
- Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
483
-
484
- .. code-block:: none
485
-
486
- conv1-> conv2->conv3->yyy->zzz1->zzz2
487
-
488
- Suppose 'stage_idx=1', the structure of blocks in the stage would be:
489
-
490
- .. code-block:: none
491
-
492
- conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
493
-
494
- If stages is missing, the plugin would be applied to all stages.
495
-
496
- Args:
497
- plugins (list[dict]): List of plugins cfg to build. The postfix is
498
- required if multiple same type plugins are inserted.
499
- stage_idx (int): Index of stage to build
500
-
501
- Returns:
502
- list[dict]: Plugins for current stage
503
- """
504
- stage_plugins = []
505
- for plugin in plugins:
506
- plugin = plugin.copy()
507
- stages = plugin.pop('stages', None)
508
- assert stages is None or len(stages) == self.num_stages
509
- # whether to insert plugin into current stage
510
- if stages is None or stages[stage_idx]:
511
- stage_plugins.append(plugin)
512
-
513
- return stage_plugins
514
-
515
- def make_res_layer(self, **kwargs):
516
- """Pack all blocks in a stage into a ``ResLayer``."""
517
- return ResLayer(**kwargs)
518
-
519
- @property
520
- def norm1(self):
521
- """nn.Module: the normalization layer named "norm1" """
522
- return getattr(self, self.norm1_name)
523
-
524
- def _make_stem_layer(self, in_channels, stem_channels):
525
- if self.deep_stem:
526
- self.stem = nn.Sequential(
527
- build_conv_layer(
528
- self.conv_cfg,
529
- in_channels,
530
- stem_channels // 2,
531
- kernel_size=3,
532
- stride=2,
533
- padding=1,
534
- bias=False),
535
- build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
536
- nn.ReLU(inplace=True),
537
- build_conv_layer(
538
- self.conv_cfg,
539
- stem_channels // 2,
540
- stem_channels // 2,
541
- kernel_size=3,
542
- stride=1,
543
- padding=1,
544
- bias=False),
545
- build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
546
- nn.ReLU(inplace=True),
547
- build_conv_layer(
548
- self.conv_cfg,
549
- stem_channels // 2,
550
- stem_channels,
551
- kernel_size=3,
552
- stride=1,
553
- padding=1,
554
- bias=False),
555
- build_norm_layer(self.norm_cfg, stem_channels)[1],
556
- nn.ReLU(inplace=True))
557
- else:
558
- self.conv1 = build_conv_layer(
559
- self.conv_cfg,
560
- in_channels,
561
- stem_channels,
562
- kernel_size=7,
563
- stride=2,
564
- padding=3,
565
- bias=False)
566
- self.norm1_name, norm1 = build_norm_layer(
567
- self.norm_cfg, stem_channels, postfix=1)
568
- self.add_module(self.norm1_name, norm1)
569
- self.relu = nn.ReLU(inplace=True)
570
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
571
-
572
- def _freeze_stages(self):
573
- if self.frozen_stages >= 0:
574
- if self.deep_stem:
575
- self.stem.eval()
576
- for param in self.stem.parameters():
577
- param.requires_grad = False
578
- else:
579
- self.norm1.eval()
580
- for m in [self.conv1, self.norm1]:
581
- for param in m.parameters():
582
- param.requires_grad = False
583
-
584
- for i in range(1, self.frozen_stages + 1):
585
- m = getattr(self, f'layer{i}')
586
- m.eval()
587
- for param in m.parameters():
588
- param.requires_grad = False
589
-
590
- def init_weights(self, pretrained=None):
591
- """Initialize the weights in backbone.
592
-
593
- Args:
594
- pretrained (str, optional): Path to pre-trained weights.
595
- Defaults to None.
596
- """
597
- if isinstance(pretrained, str):
598
- logger = get_root_logger()
599
- load_checkpoint(self, pretrained, strict=False, logger=logger)
600
- elif pretrained is None:
601
- for m in self.modules():
602
- if isinstance(m, nn.Conv2d):
603
- kaiming_init(m)
604
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
605
- constant_init(m, 1)
606
-
607
- if self.dcn is not None:
608
- for m in self.modules():
609
- if isinstance(m, Bottleneck) and hasattr(
610
- m.conv2, 'conv_offset'):
611
- constant_init(m.conv2.conv_offset, 0)
612
-
613
- if self.zero_init_residual:
614
- for m in self.modules():
615
- if isinstance(m, Bottleneck):
616
- constant_init(m.norm3, 0)
617
- elif isinstance(m, BasicBlock):
618
- constant_init(m.norm2, 0)
619
- else:
620
- raise TypeError('pretrained must be a str or None')
621
-
622
- def forward(self, x):
623
- """Forward function."""
624
- if self.deep_stem:
625
- x = self.stem(x)
626
- else:
627
- x = self.conv1(x)
628
- x = self.norm1(x)
629
- x = self.relu(x)
630
- x = self.maxpool(x)
631
- outs = []
632
- for i, layer_name in enumerate(self.res_layers):
633
- res_layer = getattr(self, layer_name)
634
- x = res_layer(x)
635
- if i in self.out_indices:
636
- outs.append(x)
637
- return tuple(outs)
638
-
639
- def train(self, mode=True):
640
- """Convert the model into training mode while keep normalization layer
641
- freezed."""
642
- super(ResNet, self).train(mode)
643
- self._freeze_stages()
644
- if mode and self.norm_eval:
645
- for m in self.modules():
646
- # trick: eval have effect on BatchNorm only
647
- if isinstance(m, _BatchNorm):
648
- m.eval()
649
-
650
-
651
- @BACKBONES.register_module()
652
- class ResNetV1d(ResNet):
653
- r"""ResNetV1d variant described in `Bag of Tricks
654
- <https://arxiv.org/pdf/1812.01187.pdf>`_.
655
-
656
- Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
657
- the input stem with three 3x3 convs. And in the downsampling block, a 2x2
658
- avg_pool with stride 2 is added before conv, whose stride is changed to 1.
659
- """
660
-
661
- def __init__(self, **kwargs):
662
- super(ResNetV1d, self).__init__(
663
- deep_stem=True, avg_down=True, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/analyze_logs.py DELETED
@@ -1,179 +0,0 @@
1
- import argparse
2
- import json
3
- from collections import defaultdict
4
-
5
- import matplotlib.pyplot as plt
6
- import numpy as np
7
- import seaborn as sns
8
-
9
-
10
- def cal_train_time(log_dicts, args):
11
- for i, log_dict in enumerate(log_dicts):
12
- print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
13
- all_times = []
14
- for epoch in log_dict.keys():
15
- if args.include_outliers:
16
- all_times.append(log_dict[epoch]['time'])
17
- else:
18
- all_times.append(log_dict[epoch]['time'][1:])
19
- all_times = np.array(all_times)
20
- epoch_ave_time = all_times.mean(-1)
21
- slowest_epoch = epoch_ave_time.argmax()
22
- fastest_epoch = epoch_ave_time.argmin()
23
- std_over_epoch = epoch_ave_time.std()
24
- print(f'slowest epoch {slowest_epoch + 1}, '
25
- f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
26
- print(f'fastest epoch {fastest_epoch + 1}, '
27
- f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
28
- print(f'time std over epochs is {std_over_epoch:.4f}')
29
- print(f'average iter time: {np.mean(all_times):.4f} s/iter')
30
- print()
31
-
32
-
33
- def plot_curve(log_dicts, args):
34
- if args.backend is not None:
35
- plt.switch_backend(args.backend)
36
- sns.set_style(args.style)
37
- # if legend is None, use {filename}_{key} as legend
38
- legend = args.legend
39
- if legend is None:
40
- legend = []
41
- for json_log in args.json_logs:
42
- for metric in args.keys:
43
- legend.append(f'{json_log}_{metric}')
44
- assert len(legend) == (len(args.json_logs) * len(args.keys))
45
- metrics = args.keys
46
-
47
- num_metrics = len(metrics)
48
- for i, log_dict in enumerate(log_dicts):
49
- epochs = list(log_dict.keys())
50
- for j, metric in enumerate(metrics):
51
- print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
52
- if metric not in log_dict[epochs[0]]:
53
- raise KeyError(
54
- f'{args.json_logs[i]} does not contain metric {metric}')
55
-
56
- if 'mAP' in metric:
57
- xs = np.arange(1, max(epochs) + 1)
58
- ys = []
59
- for epoch in epochs:
60
- ys += log_dict[epoch][metric]
61
- ax = plt.gca()
62
- ax.set_xticks(xs)
63
- plt.xlabel('epoch')
64
- plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
65
- else:
66
- xs = []
67
- ys = []
68
- num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1]
69
- for epoch in epochs:
70
- iters = log_dict[epoch]['iter']
71
- if log_dict[epoch]['mode'][-1] == 'val':
72
- iters = iters[:-1]
73
- xs.append(
74
- np.array(iters) + (epoch - 1) * num_iters_per_epoch)
75
- ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
76
- xs = np.concatenate(xs)
77
- ys = np.concatenate(ys)
78
- plt.xlabel('iter')
79
- plt.plot(
80
- xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
81
- plt.legend()
82
- if args.title is not None:
83
- plt.title(args.title)
84
- if args.out is None:
85
- plt.show()
86
- else:
87
- print(f'save curve to: {args.out}')
88
- plt.savefig(args.out)
89
- plt.cla()
90
-
91
-
92
- def add_plot_parser(subparsers):
93
- parser_plt = subparsers.add_parser(
94
- 'plot_curve', help='parser for plotting curves')
95
- parser_plt.add_argument(
96
- 'json_logs',
97
- type=str,
98
- nargs='+',
99
- help='path of train log in json format')
100
- parser_plt.add_argument(
101
- '--keys',
102
- type=str,
103
- nargs='+',
104
- default=['bbox_mAP'],
105
- help='the metric that you want to plot')
106
- parser_plt.add_argument('--title', type=str, help='title of figure')
107
- parser_plt.add_argument(
108
- '--legend',
109
- type=str,
110
- nargs='+',
111
- default=None,
112
- help='legend of each plot')
113
- parser_plt.add_argument(
114
- '--backend', type=str, default=None, help='backend of plt')
115
- parser_plt.add_argument(
116
- '--style', type=str, default='dark', help='style of plt')
117
- parser_plt.add_argument('--out', type=str, default=None)
118
-
119
-
120
- def add_time_parser(subparsers):
121
- parser_time = subparsers.add_parser(
122
- 'cal_train_time',
123
- help='parser for computing the average time per training iteration')
124
- parser_time.add_argument(
125
- 'json_logs',
126
- type=str,
127
- nargs='+',
128
- help='path of train log in json format')
129
- parser_time.add_argument(
130
- '--include-outliers',
131
- action='store_true',
132
- help='include the first value of every epoch when computing '
133
- 'the average time')
134
-
135
-
136
- def parse_args():
137
- parser = argparse.ArgumentParser(description='Analyze Json Log')
138
- # currently only support plot curve and calculate average train time
139
- subparsers = parser.add_subparsers(dest='task', help='task parser')
140
- add_plot_parser(subparsers)
141
- add_time_parser(subparsers)
142
- args = parser.parse_args()
143
- return args
144
-
145
-
146
- def load_json_logs(json_logs):
147
- # load and convert json_logs to log_dict, key is epoch, value is a sub dict
148
- # keys of sub dict is different metrics, e.g. memory, bbox_mAP
149
- # value of sub dict is a list of corresponding values of all iterations
150
- log_dicts = [dict() for _ in json_logs]
151
- for json_log, log_dict in zip(json_logs, log_dicts):
152
- with open(json_log, 'r') as log_file:
153
- for line in log_file:
154
- log = json.loads(line.strip())
155
- # skip lines without `epoch` field
156
- if 'epoch' not in log:
157
- continue
158
- epoch = log.pop('epoch')
159
- if epoch not in log_dict:
160
- log_dict[epoch] = defaultdict(list)
161
- for k, v in log.items():
162
- log_dict[epoch][k].append(v)
163
- return log_dicts
164
-
165
-
166
- def main():
167
- args = parse_args()
168
-
169
- json_logs = args.json_logs
170
- for json_log in json_logs:
171
- assert json_log.endswith('.json')
172
-
173
- log_dicts = load_json_logs(json_logs)
174
-
175
- eval(args.task)(log_dicts, args)
176
-
177
-
178
- if __name__ == '__main__':
179
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/dmnet_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='DMHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- filter_sizes=(1, 3, 5, 7),
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=dict(type='SyncBN', requires_grad=True),
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/whisper_stt/readme.md DELETED
@@ -1,15 +0,0 @@
1
- # whisper_stt
2
-
3
- Allows you to enter your inputs in chat mode using your microphone.
4
-
5
- ## Settings
6
-
7
- To adjust your default settings, you can add the following to your settings.yaml file.
8
-
9
- ```
10
- whisper_stt-whipser_language: chinese
11
- whisper_stt-whipser_model: tiny
12
- whisper_stt-auto_submit: False
13
- ```
14
-
15
- See source documentation for [model names](https://github.com/openai/whisper#available-models-and-languages) and (languages)[https://github.com/openai/whisper/blob/main/whisper/tokenizer.py] you can use.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/image_datasets.py DELETED
@@ -1,167 +0,0 @@
1
- import math
2
- import random
3
-
4
- from PIL import Image
5
- import blobfile as bf
6
- from mpi4py import MPI
7
- import numpy as np
8
- from torch.utils.data import DataLoader, Dataset
9
-
10
-
11
- def load_data(
12
- *,
13
- data_dir,
14
- batch_size,
15
- image_size,
16
- class_cond=False,
17
- deterministic=False,
18
- random_crop=False,
19
- random_flip=True,
20
- ):
21
- """
22
- For a dataset, create a generator over (images, kwargs) pairs.
23
-
24
- Each images is an NCHW float tensor, and the kwargs dict contains zero or
25
- more keys, each of which map to a batched Tensor of their own.
26
- The kwargs dict can be used for class labels, in which case the key is "y"
27
- and the values are integer tensors of class labels.
28
-
29
- :param data_dir: a dataset directory.
30
- :param batch_size: the batch size of each returned pair.
31
- :param image_size: the size to which images are resized.
32
- :param class_cond: if True, include a "y" key in returned dicts for class
33
- label. If classes are not available and this is true, an
34
- exception will be raised.
35
- :param deterministic: if True, yield results in a deterministic order.
36
- :param random_crop: if True, randomly crop the images for augmentation.
37
- :param random_flip: if True, randomly flip the images for augmentation.
38
- """
39
- if not data_dir:
40
- raise ValueError("unspecified data directory")
41
- all_files = _list_image_files_recursively(data_dir)
42
- classes = None
43
- if class_cond:
44
- # Assume classes are the first part of the filename,
45
- # before an underscore.
46
- class_names = [bf.basename(path).split("_")[0] for path in all_files]
47
- sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
48
- classes = [sorted_classes[x] for x in class_names]
49
- dataset = ImageDataset(
50
- image_size,
51
- all_files,
52
- classes=classes,
53
- shard=MPI.COMM_WORLD.Get_rank(),
54
- num_shards=MPI.COMM_WORLD.Get_size(),
55
- random_crop=random_crop,
56
- random_flip=random_flip,
57
- )
58
- if deterministic:
59
- loader = DataLoader(
60
- dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
61
- )
62
- else:
63
- loader = DataLoader(
64
- dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
65
- )
66
- while True:
67
- yield from loader
68
-
69
-
70
- def _list_image_files_recursively(data_dir):
71
- results = []
72
- for entry in sorted(bf.listdir(data_dir)):
73
- full_path = bf.join(data_dir, entry)
74
- ext = entry.split(".")[-1]
75
- if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
76
- results.append(full_path)
77
- elif bf.isdir(full_path):
78
- results.extend(_list_image_files_recursively(full_path))
79
- return results
80
-
81
-
82
- class ImageDataset(Dataset):
83
- def __init__(
84
- self,
85
- resolution,
86
- image_paths,
87
- classes=None,
88
- shard=0,
89
- num_shards=1,
90
- random_crop=False,
91
- random_flip=True,
92
- ):
93
- super().__init__()
94
- self.resolution = resolution
95
- self.local_images = image_paths[shard:][::num_shards]
96
- self.local_classes = None if classes is None else classes[shard:][::num_shards]
97
- self.random_crop = random_crop
98
- self.random_flip = random_flip
99
-
100
- def __len__(self):
101
- return len(self.local_images)
102
-
103
- def __getitem__(self, idx):
104
- path = self.local_images[idx]
105
- with bf.BlobFile(path, "rb") as f:
106
- pil_image = Image.open(f)
107
- pil_image.load()
108
- pil_image = pil_image.convert("RGB")
109
-
110
- if self.random_crop:
111
- arr = random_crop_arr(pil_image, self.resolution)
112
- else:
113
- arr = center_crop_arr(pil_image, self.resolution)
114
-
115
- if self.random_flip and random.random() < 0.5:
116
- arr = arr[:, ::-1]
117
-
118
- arr = arr.astype(np.float32) / 127.5 - 1
119
-
120
- out_dict = {}
121
- if self.local_classes is not None:
122
- out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
123
- return np.transpose(arr, [2, 0, 1]), out_dict
124
-
125
-
126
- def center_crop_arr(pil_image, image_size):
127
- # We are not on a new enough PIL to support the `reducing_gap`
128
- # argument, which uses BOX downsampling at powers of two first.
129
- # Thus, we do it by hand to improve downsample quality.
130
- while min(*pil_image.size) >= 2 * image_size:
131
- pil_image = pil_image.resize(
132
- tuple(x // 2 for x in pil_image.size), resample=Image.BOX
133
- )
134
-
135
- scale = image_size / min(*pil_image.size)
136
- pil_image = pil_image.resize(
137
- tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
138
- )
139
-
140
- arr = np.array(pil_image)
141
- crop_y = (arr.shape[0] - image_size) // 2
142
- crop_x = (arr.shape[1] - image_size) // 2
143
- return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
144
-
145
-
146
- def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
147
- min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
148
- max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
149
- smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
150
-
151
- # We are not on a new enough PIL to support the `reducing_gap`
152
- # argument, which uses BOX downsampling at powers of two first.
153
- # Thus, we do it by hand to improve downsample quality.
154
- while min(*pil_image.size) >= 2 * smaller_dim_size:
155
- pil_image = pil_image.resize(
156
- tuple(x // 2 for x in pil_image.size), resample=Image.BOX
157
- )
158
-
159
- scale = smaller_dim_size / min(*pil_image.size)
160
- pil_image = pil_image.resize(
161
- tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
162
- )
163
-
164
- arr = np.array(pil_image)
165
- crop_y = random.randrange(arr.shape[0] - image_size + 1)
166
- crop_x = random.randrange(arr.shape[1] - image_size + 1)
167
- return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/masked_conv.py DELETED
@@ -1,111 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
- from torch.autograd import Function
7
- from torch.autograd.function import once_differentiable
8
- from torch.nn.modules.utils import _pair
9
-
10
- from ..utils import ext_loader
11
-
12
- ext_module = ext_loader.load_ext(
13
- '_ext', ['masked_im2col_forward', 'masked_col2im_forward'])
14
-
15
-
16
- class MaskedConv2dFunction(Function):
17
-
18
- @staticmethod
19
- def symbolic(g, features, mask, weight, bias, padding, stride):
20
- return g.op(
21
- 'mmcv::MMCVMaskedConv2d',
22
- features,
23
- mask,
24
- weight,
25
- bias,
26
- padding_i=padding,
27
- stride_i=stride)
28
-
29
- @staticmethod
30
- def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
31
- assert mask.dim() == 3 and mask.size(0) == 1
32
- assert features.dim() == 4 and features.size(0) == 1
33
- assert features.size()[2:] == mask.size()[1:]
34
- pad_h, pad_w = _pair(padding)
35
- stride_h, stride_w = _pair(stride)
36
- if stride_h != 1 or stride_w != 1:
37
- raise ValueError(
38
- 'Stride could not only be 1 in masked_conv2d currently.')
39
- out_channel, in_channel, kernel_h, kernel_w = weight.size()
40
-
41
- batch_size = features.size(0)
42
- out_h = int(
43
- math.floor((features.size(2) + 2 * pad_h -
44
- (kernel_h - 1) - 1) / stride_h + 1))
45
- out_w = int(
46
- math.floor((features.size(3) + 2 * pad_w -
47
- (kernel_h - 1) - 1) / stride_w + 1))
48
- mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
49
- output = features.new_zeros(batch_size, out_channel, out_h, out_w)
50
- if mask_inds.numel() > 0:
51
- mask_h_idx = mask_inds[:, 0].contiguous()
52
- mask_w_idx = mask_inds[:, 1].contiguous()
53
- data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
54
- mask_inds.size(0))
55
- ext_module.masked_im2col_forward(
56
- features,
57
- mask_h_idx,
58
- mask_w_idx,
59
- data_col,
60
- kernel_h=kernel_h,
61
- kernel_w=kernel_w,
62
- pad_h=pad_h,
63
- pad_w=pad_w)
64
-
65
- masked_output = torch.addmm(1, bias[:, None], 1,
66
- weight.view(out_channel, -1), data_col)
67
- ext_module.masked_col2im_forward(
68
- masked_output,
69
- mask_h_idx,
70
- mask_w_idx,
71
- output,
72
- height=out_h,
73
- width=out_w,
74
- channels=out_channel)
75
- return output
76
-
77
- @staticmethod
78
- @once_differentiable
79
- def backward(ctx, grad_output):
80
- return (None, ) * 5
81
-
82
-
83
- masked_conv2d = MaskedConv2dFunction.apply
84
-
85
-
86
- class MaskedConv2d(nn.Conv2d):
87
- """A MaskedConv2d which inherits the official Conv2d.
88
-
89
- The masked forward doesn't implement the backward function and only
90
- supports the stride parameter to be 1 currently.
91
- """
92
-
93
- def __init__(self,
94
- in_channels,
95
- out_channels,
96
- kernel_size,
97
- stride=1,
98
- padding=0,
99
- dilation=1,
100
- groups=1,
101
- bias=True):
102
- super(MaskedConv2d,
103
- self).__init__(in_channels, out_channels, kernel_size, stride,
104
- padding, dilation, groups, bias)
105
-
106
- def forward(self, input, mask=None):
107
- if mask is None: # fallback to the normal Conv2d
108
- return super(MaskedConv2d, self).forward(input)
109
- else:
110
- return masked_conv2d(input, mask, self.weight, self.bias,
111
- self.padding)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/test.py DELETED
@@ -1,238 +0,0 @@
1
- import os.path as osp
2
- import pickle
3
- import shutil
4
- import tempfile
5
-
6
- import annotator.uniformer.mmcv as mmcv
7
- import numpy as np
8
- import torch
9
- import torch.distributed as dist
10
- from annotator.uniformer.mmcv.image import tensor2imgs
11
- from annotator.uniformer.mmcv.runner import get_dist_info
12
-
13
-
14
- def np2tmp(array, temp_file_name=None):
15
- """Save ndarray to local numpy file.
16
-
17
- Args:
18
- array (ndarray): Ndarray to save.
19
- temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
20
- function will generate a file name with tempfile.NamedTemporaryFile
21
- to save ndarray. Default: None.
22
-
23
- Returns:
24
- str: The numpy file name.
25
- """
26
-
27
- if temp_file_name is None:
28
- temp_file_name = tempfile.NamedTemporaryFile(
29
- suffix='.npy', delete=False).name
30
- np.save(temp_file_name, array)
31
- return temp_file_name
32
-
33
-
34
- def single_gpu_test(model,
35
- data_loader,
36
- show=False,
37
- out_dir=None,
38
- efficient_test=False,
39
- opacity=0.5):
40
- """Test with single GPU.
41
-
42
- Args:
43
- model (nn.Module): Model to be tested.
44
- data_loader (utils.data.Dataloader): Pytorch data loader.
45
- show (bool): Whether show results during inference. Default: False.
46
- out_dir (str, optional): If specified, the results will be dumped into
47
- the directory to save output results.
48
- efficient_test (bool): Whether save the results as local numpy files to
49
- save CPU memory during evaluation. Default: False.
50
- opacity(float): Opacity of painted segmentation map.
51
- Default 0.5.
52
- Must be in (0, 1] range.
53
- Returns:
54
- list: The prediction results.
55
- """
56
-
57
- model.eval()
58
- results = []
59
- dataset = data_loader.dataset
60
- prog_bar = mmcv.ProgressBar(len(dataset))
61
- for i, data in enumerate(data_loader):
62
- with torch.no_grad():
63
- result = model(return_loss=False, **data)
64
-
65
- if show or out_dir:
66
- img_tensor = data['img'][0]
67
- img_metas = data['img_metas'][0].data[0]
68
- imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
69
- assert len(imgs) == len(img_metas)
70
-
71
- for img, img_meta in zip(imgs, img_metas):
72
- h, w, _ = img_meta['img_shape']
73
- img_show = img[:h, :w, :]
74
-
75
- ori_h, ori_w = img_meta['ori_shape'][:-1]
76
- img_show = mmcv.imresize(img_show, (ori_w, ori_h))
77
-
78
- if out_dir:
79
- out_file = osp.join(out_dir, img_meta['ori_filename'])
80
- else:
81
- out_file = None
82
-
83
- model.module.show_result(
84
- img_show,
85
- result,
86
- palette=dataset.PALETTE,
87
- show=show,
88
- out_file=out_file,
89
- opacity=opacity)
90
-
91
- if isinstance(result, list):
92
- if efficient_test:
93
- result = [np2tmp(_) for _ in result]
94
- results.extend(result)
95
- else:
96
- if efficient_test:
97
- result = np2tmp(result)
98
- results.append(result)
99
-
100
- batch_size = len(result)
101
- for _ in range(batch_size):
102
- prog_bar.update()
103
- return results
104
-
105
-
106
- def multi_gpu_test(model,
107
- data_loader,
108
- tmpdir=None,
109
- gpu_collect=False,
110
- efficient_test=False):
111
- """Test model with multiple gpus.
112
-
113
- This method tests model with multiple gpus and collects the results
114
- under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
115
- it encodes results to gpu tensors and use gpu communication for results
116
- collection. On cpu mode it saves the results on different gpus to 'tmpdir'
117
- and collects them by the rank 0 worker.
118
-
119
- Args:
120
- model (nn.Module): Model to be tested.
121
- data_loader (utils.data.Dataloader): Pytorch data loader.
122
- tmpdir (str): Path of directory to save the temporary results from
123
- different gpus under cpu mode.
124
- gpu_collect (bool): Option to use either gpu or cpu to collect results.
125
- efficient_test (bool): Whether save the results as local numpy files to
126
- save CPU memory during evaluation. Default: False.
127
-
128
- Returns:
129
- list: The prediction results.
130
- """
131
-
132
- model.eval()
133
- results = []
134
- dataset = data_loader.dataset
135
- rank, world_size = get_dist_info()
136
- if rank == 0:
137
- prog_bar = mmcv.ProgressBar(len(dataset))
138
- for i, data in enumerate(data_loader):
139
- with torch.no_grad():
140
- result = model(return_loss=False, rescale=True, **data)
141
-
142
- if isinstance(result, list):
143
- if efficient_test:
144
- result = [np2tmp(_) for _ in result]
145
- results.extend(result)
146
- else:
147
- if efficient_test:
148
- result = np2tmp(result)
149
- results.append(result)
150
-
151
- if rank == 0:
152
- batch_size = data['img'][0].size(0)
153
- for _ in range(batch_size * world_size):
154
- prog_bar.update()
155
-
156
- # collect results from all ranks
157
- if gpu_collect:
158
- results = collect_results_gpu(results, len(dataset))
159
- else:
160
- results = collect_results_cpu(results, len(dataset), tmpdir)
161
- return results
162
-
163
-
164
- def collect_results_cpu(result_part, size, tmpdir=None):
165
- """Collect results with CPU."""
166
- rank, world_size = get_dist_info()
167
- # create a tmp dir if it is not specified
168
- if tmpdir is None:
169
- MAX_LEN = 512
170
- # 32 is whitespace
171
- dir_tensor = torch.full((MAX_LEN, ),
172
- 32,
173
- dtype=torch.uint8,
174
- device='cuda')
175
- if rank == 0:
176
- tmpdir = tempfile.mkdtemp()
177
- tmpdir = torch.tensor(
178
- bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
179
- dir_tensor[:len(tmpdir)] = tmpdir
180
- dist.broadcast(dir_tensor, 0)
181
- tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
182
- else:
183
- mmcv.mkdir_or_exist(tmpdir)
184
- # dump the part result to the dir
185
- mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
186
- dist.barrier()
187
- # collect all parts
188
- if rank != 0:
189
- return None
190
- else:
191
- # load results of all parts from tmp dir
192
- part_list = []
193
- for i in range(world_size):
194
- part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
195
- part_list.append(mmcv.load(part_file))
196
- # sort the results
197
- ordered_results = []
198
- for res in zip(*part_list):
199
- ordered_results.extend(list(res))
200
- # the dataloader may pad some samples
201
- ordered_results = ordered_results[:size]
202
- # remove tmp dir
203
- shutil.rmtree(tmpdir)
204
- return ordered_results
205
-
206
-
207
- def collect_results_gpu(result_part, size):
208
- """Collect results with GPU."""
209
- rank, world_size = get_dist_info()
210
- # dump result part to tensor with pickle
211
- part_tensor = torch.tensor(
212
- bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
213
- # gather all result part tensor shape
214
- shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
215
- shape_list = [shape_tensor.clone() for _ in range(world_size)]
216
- dist.all_gather(shape_list, shape_tensor)
217
- # padding result part tensor to max length
218
- shape_max = torch.tensor(shape_list).max()
219
- part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
220
- part_send[:shape_tensor[0]] = part_tensor
221
- part_recv_list = [
222
- part_tensor.new_zeros(shape_max) for _ in range(world_size)
223
- ]
224
- # gather all result part
225
- dist.all_gather(part_recv_list, part_send)
226
-
227
- if rank == 0:
228
- part_list = []
229
- for recv, shape in zip(part_recv_list, shape_list):
230
- part_list.append(
231
- pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
232
- # sort the results
233
- ordered_results = []
234
- for res in zip(*part_list):
235
- ordered_results.extend(list(res))
236
- # the dataloader may pad some samples
237
- ordered_results = ordered_results[:size]
238
- return ordered_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Diffusion-API/README.md DELETED
@@ -1,15 +0,0 @@
1
- ---
2
- title: Stable Diffusion ControlNet WebUI
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.19.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- tags:
12
- - making-demos
13
- ---
14
-
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/img2img_app.py DELETED
@@ -1,155 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from diffusers import StableDiffusionImg2ImgPipeline
4
- from PIL import Image
5
-
6
- from diffusion_webui.utils.model_list import stable_model_list
7
- from diffusion_webui.utils.scheduler_list import (
8
- SCHEDULER_MAPPING,
9
- get_scheduler,
10
- )
11
-
12
-
13
- class StableDiffusionImage2ImageGenerator:
14
- def __init__(self):
15
- self.pipe = None
16
-
17
- def load_model(self, stable_model_path, scheduler):
18
- if self.pipe is None or self.pipe.model_name != stable_model_path or self.pipe.scheduler_name != scheduler:
19
- self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
20
- stable_model_path, safety_checker=None, torch_dtype=torch.float16
21
- )
22
-
23
- self.pipe.model_name = stable_model_path
24
- self.pipe.scheduler_name = scheduler
25
- self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
26
- self.pipe.to("cuda")
27
- self.pipe.enable_xformers_memory_efficient_attention()
28
-
29
- return self.pipe
30
-
31
- def generate_image(
32
- self,
33
- image_path: str,
34
- stable_model_path: str,
35
- prompt: str,
36
- negative_prompt: str,
37
- num_images_per_prompt: int,
38
- scheduler: str,
39
- guidance_scale: int,
40
- num_inference_step: int,
41
- seed_generator=0,
42
- ):
43
- pipe = self.load_model(
44
- stable_model_path=stable_model_path,
45
- scheduler=scheduler,
46
- )
47
-
48
- if seed_generator == 0:
49
- random_seed = torch.randint(0, 1000000, (1,))
50
- generator = torch.manual_seed(random_seed)
51
- else:
52
- generator = torch.manual_seed(seed_generator)
53
-
54
- image = Image.open(image_path)
55
- images = pipe(
56
- prompt,
57
- image=image,
58
- negative_prompt=negative_prompt,
59
- num_images_per_prompt=num_images_per_prompt,
60
- num_inference_steps=num_inference_step,
61
- guidance_scale=guidance_scale,
62
- generator=generator,
63
- ).images
64
-
65
- return images
66
-
67
- def app():
68
- with gr.Blocks():
69
- with gr.Row():
70
- with gr.Column():
71
- image2image_image_file = gr.Image(
72
- type="filepath", label="Image"
73
- ).style(height=260)
74
-
75
- image2image_prompt = gr.Textbox(
76
- lines=1,
77
- placeholder="Prompt",
78
- show_label=False,
79
- )
80
-
81
- image2image_negative_prompt = gr.Textbox(
82
- lines=1,
83
- placeholder="Negative Prompt",
84
- show_label=False,
85
- )
86
-
87
- with gr.Row():
88
- with gr.Column():
89
- image2image_model_path = gr.Dropdown(
90
- choices=stable_model_list,
91
- value=stable_model_list[0],
92
- label="Stable Model Id",
93
- )
94
-
95
- image2image_guidance_scale = gr.Slider(
96
- minimum=0.1,
97
- maximum=15,
98
- step=0.1,
99
- value=7.5,
100
- label="Guidance Scale",
101
- )
102
- image2image_num_inference_step = gr.Slider(
103
- minimum=1,
104
- maximum=100,
105
- step=1,
106
- value=50,
107
- label="Num Inference Step",
108
- )
109
- with gr.Row():
110
- with gr.Column():
111
- image2image_scheduler = gr.Dropdown(
112
- choices=list(SCHEDULER_MAPPING.keys()),
113
- value=list(SCHEDULER_MAPPING.keys())[0],
114
- label="Scheduler",
115
- )
116
- image2image_num_images_per_prompt = gr.Slider(
117
- minimum=1,
118
- maximum=4,
119
- step=1,
120
- value=1,
121
- label="Number Of Images",
122
- )
123
-
124
- image2image_seed_generator = gr.Slider(
125
- minimum=0,
126
- maximum=1000000,
127
- step=1,
128
- value=0,
129
- label="Seed(0 for random)",
130
- )
131
-
132
- image2image_predict_button = gr.Button(value="Generator")
133
-
134
- with gr.Column():
135
- output_image = gr.Gallery(
136
- label="Generated images",
137
- show_label=False,
138
- elem_id="gallery",
139
- ).style(grid=(1, 2))
140
-
141
- image2image_predict_button.click(
142
- fn=StableDiffusionImage2ImageGenerator().generate_image,
143
- inputs=[
144
- image2image_image_file,
145
- image2image_model_path,
146
- image2image_prompt,
147
- image2image_negative_prompt,
148
- image2image_num_images_per_prompt,
149
- image2image_scheduler,
150
- image2image_guidance_scale,
151
- image2image_num_inference_step,
152
- image2image_seed_generator,
153
- ],
154
- outputs=[output_image],
155
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtyomKhyan/Detection/models/experimental.py DELETED
@@ -1,109 +0,0 @@
1
- # This file contains experimental modules
2
-
3
- from models.common import *
4
-
5
-
6
- class CrossConv(nn.Module):
7
- # Cross Convolution Downsample
8
- def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
9
- # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
10
- super(CrossConv, self).__init__()
11
- c_ = int(c2 * e) # hidden channels
12
- self.cv1 = Conv(c1, c_, (1, k), (1, s))
13
- self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
14
- self.add = shortcut and c1 == c2
15
-
16
- def forward(self, x):
17
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
18
-
19
-
20
- class C3(nn.Module):
21
- # Cross Convolution CSP
22
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
23
- super(C3, self).__init__()
24
- c_ = int(c2 * e) # hidden channels
25
- self.cv1 = Conv(c1, c_, 1, 1)
26
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
27
- self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
28
- self.cv4 = Conv(2 * c_, c2, 1, 1)
29
- self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
30
- self.act = nn.LeakyReLU(0.1, inplace=True)
31
- self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
32
-
33
- def forward(self, x):
34
- y1 = self.cv3(self.m(self.cv1(x)))
35
- y2 = self.cv2(x)
36
- return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
37
-
38
-
39
- class Sum(nn.Module):
40
- # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
41
- def __init__(self, n, weight=False): # n: number of inputs
42
- super(Sum, self).__init__()
43
- self.weight = weight # apply weights boolean
44
- self.iter = range(n - 1) # iter object
45
- if weight:
46
- self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
47
-
48
- def forward(self, x):
49
- y = x[0] # no weight
50
- if self.weight:
51
- w = torch.sigmoid(self.w) * 2
52
- for i in self.iter:
53
- y = y + x[i + 1] * w[i]
54
- else:
55
- for i in self.iter:
56
- y = y + x[i + 1]
57
- return y
58
-
59
-
60
- class GhostConv(nn.Module):
61
- # Ghost Convolution https://github.com/huawei-noah/ghostnet
62
- def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
63
- super(GhostConv, self).__init__()
64
- c_ = c2 // 2 # hidden channels
65
- self.cv1 = Conv(c1, c_, k, s, g, act)
66
- self.cv2 = Conv(c_, c_, 5, 1, c_, act)
67
-
68
- def forward(self, x):
69
- y = self.cv1(x)
70
- return torch.cat([y, self.cv2(y)], 1)
71
-
72
-
73
- class GhostBottleneck(nn.Module):
74
- # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
75
- def __init__(self, c1, c2, k, s):
76
- super(GhostBottleneck, self).__init__()
77
- c_ = c2 // 2
78
- self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
79
- DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
80
- GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
81
- self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
82
- Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
83
-
84
- def forward(self, x):
85
- return self.conv(x) + self.shortcut(x)
86
-
87
-
88
- class MixConv2d(nn.Module):
89
- # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
90
- def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
91
- super(MixConv2d, self).__init__()
92
- groups = len(k)
93
- if equal_ch: # equal c_ per group
94
- i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
95
- c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
96
- else: # equal weight.numel() per group
97
- b = [c2] + [0] * groups
98
- a = np.eye(groups + 1, groups, k=-1)
99
- a -= np.roll(a, 1, axis=1)
100
- a *= np.array(k) ** 2
101
- a[0] = 1
102
- c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
103
-
104
- self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
105
- self.bn = nn.BatchNorm2d(c2)
106
- self.act = nn.LeakyReLU(0.1, inplace=True)
107
-
108
- def forward(self, x):
109
- return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzinZ/vitscn/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/queries/mockLLMResponse.ts DELETED
@@ -1,24 +0,0 @@
1
- import { LLMResponse } from "@/types"
2
-
3
- export const mockLLMResponse: LLMResponse = [
4
- {
5
- "panel": 1,
6
- "instructions": "Close-up of cat's face, looking straight at reader with a smirk on its face",
7
- "caption": "Feline mischief"
8
- },
9
- {
10
- "panel": 2,
11
- "instructions": "Medium shot of cat sniffing a glass of milk, with a surprised expression",
12
- "caption": "Uh oh, what's this?"
13
- },
14
- {
15
- "panel": 3,
16
- "instructions": "Wide shot of cat knocking over the glass of milk, with a crazed look in its eyes",
17
- "caption": "Cat-astrophe!"
18
- },
19
- {
20
- "panel": 4,
21
- "instructions": "Close-up of cat's face, looking satisfied with a milk moustache",
22
- "caption": "Mission accomplished"
23
- }
24
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Naruto Ultimate Ninja Storm 4 Para Android.md DELETED
@@ -1,86 +0,0 @@
1
- <br />
2
- <h1>Descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb</h1>
3
- <p>Si eres un fan de Naruto, la popular serie de manga y anime, es posible que quieras probar Naruto Ultimate Ninja Storm, un juego de lucha que te permite experimentar las batallas épicas del mundo ninja. Pero ¿qué pasa si tiene espacio de almacenamiento limitado o una conexión a Internet lenta? No te preocupes, todavía puedes descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb y disfrutar del juego sin ningún tipo de molestia. En este artículo, te mostraremos cómo hacerlo, y también te daremos información sobre el juego y sus características. </p>
4
- <h2>¿Qué es Naruto Ultimate Ninja Storm? </h2>
5
- <h3>Una breve introducción al juego y sus características</h3>
6
- <p>Naruto Ultimate Ninja Storm es la primera entrega de la serie Ultimate Ninja Storm, desarrollada por CyberConnect2 y publicada por Namco Bandai Games. Fue lanzado para PlayStation 3 en 2008, y más tarde remasterizado en HD para PlayStation 4, Windows, Xbox One y Nintendo Switch. </p>
7
- <h2>Cómo descargar Naruto Ultimate Ninja Storm 4 para Android</h2><br /><p><b><b>Download</b> >> <a href="https://bltlly.com/2v6JSU">https://bltlly.com/2v6JSU</a></b></p><br /><br />
8
- <p>El juego está basado en la serie de manga y anime de Naruto de Masashi Kishimoto, y cubre los eventos desde el comienzo de la historia hasta el final del arco de recuperación de Sasuke. El juego cuenta con más de 25 personajes jugables, cada uno con sus propios movimientos, habilidades y transformaciones. El juego también permite a los jugadores personalizar el jutsu de sus personajes y seleccionar dos personajes de apoyo para ayudarles en la batalla. </p>
9
- <p>El juego cuenta con un impresionante motor de gráficos 3D que crea entornos inmersivos y efectos dinámicos. El juego también cuenta con un modo de roaming libre que permite a los jugadores explorar la Aldea de Hojas Ocultas e interactuar con otros personajes. El juego también tiene un modo historia que sigue la trama principal de la serie, así como un modo misión que ofrece varios desafíos y recompensas. </p>
10
- <h3>Los beneficios de descargar el juego en un formato altamente comprimido</h3>
11
-
12
- <p>Es por eso que descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb es una gran solución. Al comprimir los archivos del juego a un tamaño más pequeño, puedes ahorrar hasta un 98% de espacio sin perder calidad ni funcionalidad. También puedes descargar el juego más rápido y más fácil, ya que te llevará menos tiempo y ancho de banda transferir. </p>
13
- <p>Descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb también tiene otro beneficio: puede mejorar su rendimiento de juego. Al reducir el tamaño de los archivos del juego, puede reducir la carga en su sistema y hacerlo funcionar más suave y más rápido. También puede evitar cualquier retraso o fallo que pueda ocurrir debido a los grandes tamaños de archivos. </p>
14
- <h2>¿Cómo descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb? </h2>
15
- <h3>Los pasos a seguir para descargar el juego desde una fuente confiable</h3>
16
- <p>Si desea descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb , es necesario seguir estos pasos:</p>
17
- <ol>
18
- <li>Ir a un sitio web confiable que ofrece el juego en un formato altamente comprimido. Puede utilizar [este enlace] para acceder a una de las mejores fuentes para juegos altamente comprimidos. </li>
19
- <li>Haga clic en el botón de descarga y espere a que se descargue el archivo del juego. El tamaño del archivo debe ser de alrededor de 100 MB.</li>
20
- <li>Extraiga el archivo del juego usando un software como WinRAR o 7-Zip. Obtendrá una carpeta que contiene los archivos del juego y un archivo de configuración. </li>
21
- <li>Ejecute el archivo de configuración y siga las instrucciones para instalar el juego en su sistema. Tendrá que elegir una carpeta de destino y aceptar los términos y condiciones. </li>
22
- <li> Una vez completada la instalación, puede iniciar el juego desde el acceso directo del escritorio o el menú de inicio. </li>
23
- </ol>
24
- <p>Felicidades, usted ha descargado con éxito Naruto Ultimate Ninja Storm altamente comprimido 100mb! </p>
25
- <p></p>
26
- <h3>Los requisitos del sistema y el proceso de instalación del juego</h3>
27
-
28
- <tabla>
29
- <tr><th>Requisitos mínimos</th><th>Requisitos recomendados</th></tr>
30
- <tr><td>OS: Windows 7 o superior (64-bit)</td><td>OS: Windows 10 (64-bit)</td></tr>
31
- <tr><td>CPU: Intel Core i3-530 o AMD Phenom II X4 940</td><td>CPU: Intel Core i5-6400 o AMD FX-8320</td></tr>
32
- <tr><td>RAM: 4 GB</td><td>RAM: 8 GB</td></tr>
33
- <tr><td>GPU: NVIDIA GeForce GT 730 o AMD Radeon R7 240</td><td>GPU: NVIDIA GeForce GTX 1060 o AMD Radeon RX 480</td></tr>
34
- <tr><td>DirectX: Versión 11</td><td>DirectX: Versión 11</td></tr>
35
- <tr><td>Almacenamiento: 6 GB de espacio disponible</td><td>Almacenamiento: 6 GB de espacio disponible</td></tr>
36
- <tr><td>Tarjeta de sonido: tarjeta de sonido compatible con DirectX o chipset a bordo</td><td>Tarjeta de sonido: tarjeta de sonido compatible con DirectX o chipset a bordo</td></tr>
37
- </tabla>
38
- <p>Si su sistema cumple con estos requisitos, puede proceder a descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb e instalarlo en su sistema. El proceso de instalación es simple y directo, como hemos explicado en la sección anterior. Sin embargo, si encuentra algún problema o error durante la instalación, puede probar estas soluciones:</p>
39
- <ul>
40
- <li>Asegúrese de que su software antivirus no está bloqueando o eliminando cualquier archivo de juego. Es posible que necesite desactivarlo temporalmente o agregar una excepción para la carpeta del juego. </li>
41
- <li> Asegúrese de que tiene suficiente espacio libre en la unidad del sistema y la carpeta de destino. Es posible que necesite eliminar algunos archivos no deseados o moverlos a otra ubicación. </li>
42
- <li>Asegúrese de haber instalado todos los controladores y actualizaciones necesarios para su sistema y tarjeta gráfica. Es posible que tenga que consultar el sitio web del fabricante para las últimas versiones. </li>
43
- <li>Asegúrese de haber extraído el archivo del juego correcta y completamente. Es posible que tenga que volver a descargarlo o usar otro software para extraerlo. </li>
44
- <li>Asegúrese de haber ejecutado el archivo de configuración como administrador. Es posible que necesite hacer clic derecho sobre él y seleccionar "Ejecutar como administrador". </li>
45
-
46
- </ul>
47
- <h2>¿Cómo disfrutar de Naruto Ultimate Ninja Storm altamente comprimido 100mb? </h2>
48
- <h3>El modo de juego y modos del juego</h3>
49
- <p>Naruto Ultimate Ninja Storm altamente comprimido 100mb ofrece una experiencia de juego emocionante e inmersiva que te hará sentir como si fueras parte del universo de Naruto. El juego tiene tres modos principales: Batalla Libre, Modo Historia y Modo Misión. </p>
50
- <p>En el modo Batalla Libre, puedes elegir cualquier personaje y luchar contra otro personaje controlado por el ordenador u otro jugador. También puedes personalizar el jutsu de tu personaje y seleccionar dos personajes de apoyo para ayudarte en la batalla. Puedes elegir entre diferentes etapas según las ubicaciones de la serie, como Konoha, Orochimaru’s Hideout, Valley of the End, etc. También puedes ajustar el nivel de dificultad y el límite de tiempo de cada partido. </p>
51
- <p>En el Modo Historia, puedes revivir los eventos de la serie de Naruto desde la perspectiva de Naruto. Puedes explorar el Hidden Leaf Village e interactuar con otros personajes, así como participar en batallas que siguen la trama principal de la serie. También puedes desbloquear nuevos personajes, jutsu y objetos completando ciertos objetivos y recogiendo pergaminos. El modo historia cubre los eventos desde el comienzo de la serie hasta el final del arco de recuperación de Sasuke. </p>
52
- <p>En el Modo Misión, puedes llevar a cabo varias misiones que ponen a prueba tus habilidades y habilidades. Puedes elegir entre diferentes tipos de misiones, tales como supervivencia, ataque de tiempo, escolta, sigilo, etc. También puedes ganar dinero y recompensas completando misiones y usarlas para comprar artículos y accesorios de la tienda. El modo misión ofrece una variedad de desafíos y escenarios que te mantendrán entretenido y comprometido. </p>
53
- <h3>Los consejos y trucos para dominar el juego</h3>
54
- <p>Naruto Ultimate Ninja Storm altamente comprimido 100mb es un juego que requiere estrategia, tiempo y habilidad para dominar. Aquí hay algunos consejos y trucos que te ayudarán a mejorar tu jugabilidad y disfrutar más del juego:</p>
55
-
56
- <li>Aprende los fundamentos del sistema de combate. Puedes usar cuatro botones para realizar diferentes acciones: ataque, chakra, salto y guardia. También puedes usar el pad direccional o el stick analógico para mover a tu personaje y esquivar los ataques. Puedes combinar diferentes botones para realizar combos, jutsu, lanzamientos, sustituciones, etc. También puedes usar los botones de hombro para activar tus personajes de soporte o tu jutsu definitivo. </li>
57
- <li>Conoce las fortalezas y debilidades de tu personaje. Cada personaje tiene sus propios movimientos, habilidades y transformaciones. Algunos personajes son mejores en el combate de corto alcance, mientras que otros son mejores en el combate de largo alcance. Algunos personajes tienen jutsu más potente, mientras que otros tienen más velocidad o defensa. Algunos personajes pueden transformarse en su estado de despertar, mientras que otros pueden usar su modo de maldición o bestia de cola. Debes elegir un personaje que se adapte a tu estilo de juego y estrategia. </li>
58
- <li>Usa tu chakra sabiamente. Chakra es la energía que te permite realizar jutsu y otros movimientos especiales. Tu medidor de chakras se muestra en la parte inferior de la pantalla, y se agota a medida que lo usas. Puedes reponer tu chakra manteniendo pulsado el botón chakra, pero esto te dejará vulnerable a los ataques. Debes equilibrar el uso y la recuperación de tus chakras, y evitar desperdiciarlos en movimientos innecesarios. </li>
59
- <li>Usa tus personajes de apoyo de manera efectiva. Los personajes de apoyo son aliados que pueden ayudarte en la batalla atacándote, defendiéndote o curándote. Puede seleccionar dos caracteres de soporte antes de cada partido, y puede cambiar entre ellos pulsando los botones de hombro. También puede elegir entre diferentes tipos de soporte: tipo de ataque, tipo de defensa o tipo de equilibrio. Los personajes de soporte de tipo ataque lanzarán poderosos ataques contra tu oponente, los personajes de soporte de tipo defensa te protegerán de los ataques entrantes y los personajes de soporte de tipo equilibrio harán ambas cosas. </li>
60
-
61
- </ul>
62
- <h2>Conclusión</h2>
63
- <h3>Un resumen de los puntos principales y una llamada a la acción</h3>
64
- <p>Naruto Ultimate Ninja Storm es un fantástico juego que te permite experimentar las batallas épicas de la serie Naruto en impresionantes gráficos en 3D y un juego inmersivo. El juego cuenta con más de 25 personajes jugables, cada uno con sus propios movimientos, habilidades y transformaciones. El juego también tiene tres modos principales: Batalla Libre, Modo Historia y Modo Misión.</p>
65
- <p>Si desea descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb , puede seguir los pasos que hemos proporcionado en este artículo y disfrutar del juego sin ningún tipo de molestia. Puede ahorrar mucho espacio y tiempo al descargar el juego en un formato altamente comprimido, y también mejorar su rendimiento de juego. También puedes aprender más sobre el juego y sus características, y dominar el juego con nuestros consejos y trucos. </p>
66
- <p>¿Qué estás esperando? Descargar Naruto Ultimate Ninja Storm altamente comprimido 100mb hoy y dar rienda suelta a su ninja interior! </p>
67
- <h2>Preguntas frecuentes</h2>
68
- <h4>Q1: ¿Vale la pena jugar Naruto Ultimate Ninja Storm? </h4>
69
- <p>A1: Sí, Naruto Ultimate Ninja Storm vale la pena jugar, especialmente si eres un fan de Naruto o juegos de lucha. El juego ofrece una fiel adaptación de la serie de Naruto, con gráficos impresionantes, un juego inmersivo y una variedad de personajes y modos. El juego también es divertido y fácil de jugar, con un sistema de combate simple e intuitivo. </p>
70
- <h4>Q2: ¿Cuánto tiempo es Naruto Ultimate Ninja Storm? </h4>
71
- <p>A2: Naruto Ultimate Ninja Storm no es un juego muy largo, ya que cubre solo la primera parte de la serie de Naruto. El modo historia se puede completar en aproximadamente 10 horas, mientras que el modo misión puede tomar otras 10 horas. El modo de batalla libre se puede jugar indefinidamente, ya que ofrece un sinfín de partidos y opciones de personalización. </p>
72
- <h4>Q3: ¿Puedo jugar Naruto Ultimate Ninja Storm en línea? </h4>
73
-
74
- <h4>Q4: ¿Cuáles son las diferencias entre Naruto Ultimate Ninja Storm y sus secuelas? </h4>
75
- <p>A4: Naruto Ultimate Ninja Storm es el primer juego de la serie Ultimate Ninja Storm, y tiene algunas diferencias con sus secuelas. Algunas de las principales diferencias son:</p>
76
- <ul>
77
- <li>El juego cubre solo la primera parte de la serie Naruto, mientras que las secuelas cubren la segunda parte (Shippuden) y más allá. </li>
78
- <li>El juego tiene menos personajes jugables que las secuelas, ya que solo incluye personajes que aparecieron en la primera parte de la serie. </li>
79
- <li>El juego no tiene un modo online, mientras que algunas de las secuelas sí. </li>
80
- <li>El juego tiene un modo de roaming libre que te permite explorar la Aldea de Hoja Oculta, mientras que las secuelas tienen un enfoque más lineal y cinematográfico al modo historia. </li>
81
- <li>El juego tiene un estilo de arte diferente a las secuelas, ya que utiliza gráficos de cel-shaded que se asemejan al anime más de cerca. </li>
82
- </ul>
83
- <h4>Q5: ¿Dónde puedo encontrar juegos más comprimidos? </h4>
84
- <p>A5: Si está buscando juegos más comprimidos, puede visitar [este sitio web] para encontrar una gran colección de juegos en varios géneros y plataformas. Puede descargar juegos en tamaños que van desde 10 MB a 1 GB, dependiendo de su preferencia y capacidad del sistema. También puedes encontrar juegos compatibles con Windows, Android, iOS, PlayStation, Xbox, etc.</p> 64aa2da5cf<br />
85
- <br />
86
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cruce De Carretera Todo Desbloqueado Apk.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>Crossy Road todo desbloqueado APK: Cómo conseguir todos los caracteres gratis</h1>
3
- <p>Crossy Road es uno de los juegos de árcade más adictivos y divertidos en dispositivos móviles. Está inspirado en el clásico juego Frogger, pero con un toque moderno. Usted tiene que ayudar a un pollo u otros personajes a cruzar carreteras concurridas, ríos, vías férreas, y más, evitando los coches, camiones, trenes, águilas, y otros peligros. El juego tiene unos gráficos pixelados de estilo retro y una enorme colección de personajes inspirados en el arte pop que puedes desbloquear jugando o comprando monedas. </p>
4
- <p>Pero ¿qué pasa si quieres conseguir todos los personajes sin gastar dinero o jugar durante horas? Bueno, hay una manera de hacer eso, pero se trata de usar un archivo APK. Un archivo APK es un paquete de aplicaciones de Android que contiene todos los archivos y datos necesarios para ejecutar una aplicación en su dispositivo. Algunas personas usan archivos APK para instalar aplicaciones que no están disponibles en la tienda oficial de aplicaciones, o para acceder a funciones que normalmente no están disponibles en la versión normal de la aplicación. </p>
5
- <h2>cruce de carretera todo desbloqueado apk</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash; <a href="https://bltlly.com/2v6LAs">https://bltlly.com/2v6LAs</a></b></p><br /><br />
6
- <p>Una de estas características es desbloquear todos los personajes en Crossy Road. Mediante el uso de un archivo APK que ha sido modificado por otra persona, puede obtener todos los personajes de forma gratuita, sin tener que jugar o pagar por ellos. Suena tentador, ¿no es así? Pero antes de apresurarse a descargar e instalar un archivo APK para Crossy Road, usted debe saber que hay algunos beneficios y riesgos involucrados en hacerlo. </p>
7
- <h2>Cómo descargar e instalar Crossy Road todo desbloqueado APK</h2>
8
- <p>Si decide utilizar un archivo APK para desbloquear todos los caracteres en Crossy Road, aquí están los pasos que debe seguir:</p>
9
- <h3>Paso 1: Encontrar una fuente confiable para el archivo APK</h3>
10
-
11
- <h3>Paso 2: Habilitar fuentes desconocidas en el dispositivo</h3>
12
- <p>De forma predeterminada, el dispositivo no le permitirá instalar aplicaciones desde fuentes distintas de la tienda de aplicaciones oficial. Esta es una medida de seguridad para evitar la instalación de aplicaciones dañinas o no autorizadas. Sin embargo, si desea instalar un archivo APK, debe habilitar fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Puede ver un mensaje de advertencia que le informa sobre los riesgos de instalar aplicaciones de fuentes desconocidas. Léalo cuidadosamente y toque OK si está de acuerdo. </p>
13
- <h3>Paso 3: Descargar e instalar el archivo APK</h3>
14
- <p>Una vez que haya habilitado fuentes desconocidas en su dispositivo, puede proceder a descargar e instalar el archivo APK. Para ello, vaya a la página web donde se encuentra el archivo APK y toque en el botón de descarga. Puede ver una notificación que le indica que este tipo de archivo puede dañar su dispositivo. Pulse Aceptar si confía en la fuente. Una vez finalizada la descarga, abra la aplicación de administrador de archivos en su dispositivo y localice el archivo APK. Toque en ella para iniciar el proceso de instalación. Puede ver un mensaje que le pide permiso para instalar la aplicación. Pulse Instalar y espere a que finalice la instalación. </p>
15
- <h3>Paso 4: Iniciar el juego y disfrutar de todos los personajes</h3>
16
- <p>Después de la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Deberías ver un mensaje que te diga que has desbloqueado todos los personajes de Crossy Road. Ahora puedes elegir el personaje que quieras y jugar con él. También puedes cambiar entre diferentes personajes y mundos como quieras. </p>
17
- <h2>Cómo jugar Crossy Road con todos los personajes</h2>
18
- <p>Ahora que tienes todos los personajes en Crossy Road, puedes preguntarte cómo jugar con ellos y cuáles son sus características especiales. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego más:</p>
19
- <h3>Consejos y trucos para cruzar la carretera, evitar obstáculos y recoger monedas</h3>
20
-
21
- - <p>No te apresures. A veces, es mejor esperar un hueco en el tráfico o un lugar seguro que avanzar imprudentemente. Sin embargo, no esperes demasiado o un águila caerá y te agarrará. </p>
22
- - <p>Utilice el entorno. Algunos objetos en el entorno pueden ayudarle a cruzar la carretera o evitar obstáculos. Por ejemplo, puedes subirte a troncos, nenúfares o témpanos de hielo para cruzar ríos, o usar trenes, autos o cohetes para moverte más rápido. </p>
23
- <p></p>
24
- - <p>Recoge monedas. Las monedas se encuentran dispersas por todo el juego y pueden ayudarte a desbloquear más personajes o comprar pistas. También puedes obtener monedas viendo anuncios o completando misiones. </p>
25
- - <p>Usa pistas. Las pistas son pistas que te dicen cómo desbloquear ciertos personajes o mundos. Puedes comprar pistas con monedas u obtenerlas gratis viendo anuncios. </p>
26
- <h3>Curiosidades y características de diferentes personajes y mundos</h3>
27
- <p>Uno de los aspectos más atractivos de Crossy Road es la variedad y diversidad de personajes y mundos con los que puedes jugar. Cada personaje tiene su propia personalidad, apariencia, efectos de sonido y animaciones. Algunos personajes también tienen habilidades especiales o efectos que pueden cambiar el juego. Por ejemplo, algunos personajes pueden volar, nadar, disparar, explotar o transformarse. Algunos personajes también tienen interacciones secretas con otros personajes u objetos en el juego. </p>
28
- <p>De manera similar, cada mundo tiene su propio tema, fondo, música y obstáculos. Algunos mundos se basan en lugares reales, como Australia, China o Inglaterra, mientras que otros se basan en escenarios ficticios, como Halloween, Space o The Wizard of Oz. Algunos mundos también tienen secretos ocultos o huevos de Pascua que puedes descubrir jugando con ciertos personajes o haciendo ciertas acciones. </p>
29
- <p>Para darte una idea de la diversidad y creatividad de los personajes y mundos de Crossy Road, aquí hay algunos ejemplos:</p>
30
- <tabla>
31
- <tr>
32
- <th>Carácter</th>
33
- <th>Mundo</th>
34
- <th>Característica</th>
35
- </tr>
36
- <tr>
37
- <td>Pingüino</td>
38
- <td>Ártico</td>
39
-
40
- </tr>
41
- <tr>
42
- <td>Zombie</td>
43
- <td>Halloween</td>
44
- <td>Puede infectar a otros personajes y convertirlos en zombies</td>
45
- </tr>
46
- <tr>
47
- <td>Señora del gato</td>
48
- <td>Carretera transversal</td>
49
- <td>Tiene una horda de gatos siguiéndola por todas partes</td>
50
- </tr>
51
- <tr>
52
- <td>P-Switch</td>
53
- <td>Mario World</td>
54
- <td>Puede convertir monedas en ladrillos y viceversa</td>
55
- </tr>
56
- <tr>
57
- <td>Doge</td>
58
- <td>Mundo dogo</td>
59
- <td>Tiene efectos de sonido inspirados en memes y burbujas de texto</td>
60
- </tr>
61
- <tr>
62
- <td>Jirafa</td>
63
- <td>Sabana</td>
64
- <td>Tiene un cuello largo que puede alcanzar lugares altos</td>
65
- </tr>
66
- <tr>
67
- <td>Marty McFly</td>
68
- <td>El futuro</td>
69
- <td>Tiene un hoverboard que puede volar sobre los obstáculos</td>
70
- </tr>
71
- <tr>
72
- <td>T-Rex</td ><td>Jurassic World</td ><td>Puede rugir y asustar a otros dinosaurios</td ></tr ></table ><h2 >Conclusión ></h2 ><p >Crossy Road es un juego divertido y adictivo que ofrece entretenimiento y desafíos sin fin. Con un archivo APK, puedes desbloquear todos los personajes del juego de forma gratuita y disfrutar jugando con ellos en diferentes mundos. Sin embargo, debes ser consciente de los riesgos de usar un archivo APK, como un posible malware, robo de datos o problemas legales. También debes respetar a los desarrolladores del juego y apoyarlos comprando monedas o personajes si puedes. Crossy Road es un juego que merece su aprecio y atención. Ya sea que uses un archivo APK o no, esperamos que te diviertas jugando Crossy Road y descubriendo todos los personajes y mundos que tiene para ofrecer. </p>
73
- <h2>Preguntas frecuentes</h2>
74
- <p>Aquí hay algunas preguntas frecuentes sobre Crossy Road y archivos APK:</p>
75
- <h3>Q: ¿Cuántos personajes hay en Crossy Road? </h3>
76
- <p>A: Hay más de 200 caracteres en Crossy Road, incluyendo animales, personas, vehículos y más. Algunos de ellos se basan en referencias de la cultura popular, como Star Wars, Harry Potter o Minecraft. Algunos de ellos también son exclusivos para ciertas plataformas, como iOS, Android o Windows.</p>
77
- <h3>Q: ¿Cómo puedo desbloquear caracteres en Crossy Road sin usar un archivo APK? </h3>
78
-
79
- <h3>Q: ¿Usar un archivo APK es ilegal o no es ético? </h3>
80
- <p>A: El uso de un archivo APK para desbloquear todos los personajes en Crossy Road puede ser considerado ilegal o poco ético por algunas personas. Esto se debe a que está utilizando una versión modificada del juego que evita el sistema de pago original y viola los términos de servicio del juego. También estás privando a los desarrolladores del juego de sus ingresos y reconocimiento legítimos. Sin embargo, algunas personas pueden argumentar que el uso de un archivo APK es inofensivo y no afecta la jugabilidad o la calidad del juego. </p>
81
- <h3>Q: ¿Cuáles son los riesgos de usar un archivo APK? </h3>
82
- <p>A: El uso de un archivo APK puede exponerlo a algunos riesgos, como malware, robo de datos o problemas legales. El malware es un software que puede dañar su dispositivo o robar su información personal. El robo de datos es cuando alguien accede a sus datos privados sin su permiso. Los problemas legales son cuando usted enfrenta consecuencias legales por violar las leyes o regulaciones de su país o región. Para evitar estos riesgos, solo debe descargar e instalar archivos APK de fuentes confiables y escanearlos con software antivirus antes de instalarlos. </p>
83
- <h3>Q: ¿Cómo puedo desinstalar un archivo APK? </h3>
84
- <p>A: Si desea desinstalar un archivo APK de su dispositivo, puede seguir estos pasos:</p>
85
- - <p>Ir a Configuración > Aplicaciones > Crossy Road</p>
86
- - <p>Toque en desinstalar y confirmar su elección</p>
87
- - <p>Alternativamente, también puede pulsar largo en el icono de la aplicación en la pantalla de inicio o cajón de aplicaciones y arrastrarlo a la opción de desinstalación</p>
88
- - <p>Tenga en cuenta que la desinstalación de un archivo APK eliminará todos los datos y el progreso asociado con él. Si desea mantener sus datos y el progreso, debe hacer una copia de seguridad antes de desinstalar. </p> 64aa2da5cf<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/build.py DELETED
@@ -1,146 +0,0 @@
1
- import sys
2
- import warnings
3
- from typing import TYPE_CHECKING, List, Dict
4
- from distutils.command.build import build as _build
5
-
6
- from setuptools import SetuptoolsDeprecationWarning
7
-
8
- if sys.version_info >= (3, 8):
9
- from typing import Protocol
10
- elif TYPE_CHECKING:
11
- from typing_extensions import Protocol
12
- else:
13
- from abc import ABC as Protocol
14
-
15
-
16
- _ORIGINAL_SUBCOMMANDS = {"build_py", "build_clib", "build_ext", "build_scripts"}
17
-
18
-
19
- class build(_build):
20
- # copy to avoid sharing the object with parent class
21
- sub_commands = _build.sub_commands[:]
22
-
23
- def get_sub_commands(self):
24
- subcommands = {cmd[0] for cmd in _build.sub_commands}
25
- if subcommands - _ORIGINAL_SUBCOMMANDS:
26
- msg = """
27
- It seems that you are using `distutils.command.build` to add
28
- new subcommands. Using `distutils` directly is considered deprecated,
29
- please use `setuptools.command.build`.
30
- """
31
- warnings.warn(msg, SetuptoolsDeprecationWarning)
32
- self.sub_commands = _build.sub_commands
33
- return super().get_sub_commands()
34
-
35
-
36
- class SubCommand(Protocol):
37
- """In order to support editable installations (see :pep:`660`) all
38
- build subcommands **SHOULD** implement this protocol. They also **MUST** inherit
39
- from ``setuptools.Command``.
40
-
41
- When creating an :pep:`editable wheel <660>`, ``setuptools`` will try to evaluate
42
- custom ``build`` subcommands using the following procedure:
43
-
44
- 1. ``setuptools`` will set the ``editable_mode`` attribute to ``True``
45
- 2. ``setuptools`` will execute the ``run()`` command.
46
-
47
- .. important::
48
- Subcommands **SHOULD** take advantage of ``editable_mode=True`` to adequate
49
- its behaviour or perform optimisations.
50
-
51
- For example, if a subcommand don't need to generate any extra file and
52
- everything it does is to copy a source file into the build directory,
53
- ``run()`` **SHOULD** simply "early return".
54
-
55
- Similarly, if the subcommand creates files that would be placed alongside
56
- Python files in the final distribution, during an editable install
57
- the command **SHOULD** generate these files "in place" (i.e. write them to
58
- the original source directory, instead of using the build directory).
59
- Note that ``get_output_mapping()`` should reflect that and include mappings
60
- for "in place" builds accordingly.
61
-
62
- 3. ``setuptools`` use any knowledge it can derive from the return values of
63
- ``get_outputs()`` and ``get_output_mapping()`` to create an editable wheel.
64
- When relevant ``setuptools`` **MAY** attempt to use file links based on the value
65
- of ``get_output_mapping()``. Alternatively, ``setuptools`` **MAY** attempt to use
66
- :doc:`import hooks <python:reference/import>` to redirect any attempt to import
67
- to the directory with the original source code and other files built in place.
68
-
69
- Please note that custom sub-commands **SHOULD NOT** rely on ``run()`` being
70
- executed (or not) to provide correct return values for ``get_outputs()``,
71
- ``get_output_mapping()`` or ``get_source_files()``. The ``get_*`` methods should
72
- work independently of ``run()``.
73
- """
74
-
75
- editable_mode: bool = False
76
- """Boolean flag that will be set to ``True`` when setuptools is used for an
77
- editable installation (see :pep:`660`).
78
- Implementations **SHOULD** explicitly set the default value of this attribute to
79
- ``False``.
80
- When subcommands run, they can use this flag to perform optimizations or change
81
- their behaviour accordingly.
82
- """
83
-
84
- build_lib: str
85
- """String representing the directory where the build artifacts should be stored,
86
- e.g. ``build/lib``.
87
- For example, if a distribution wants to provide a Python module named ``pkg.mod``,
88
- then a corresponding file should be written to ``{build_lib}/package/module.py``.
89
- A way of thinking about this is that the files saved under ``build_lib``
90
- would be eventually copied to one of the directories in :obj:`site.PREFIXES`
91
- upon installation.
92
-
93
- A command that produces platform-independent files (e.g. compiling text templates
94
- into Python functions), **CAN** initialize ``build_lib`` by copying its value from
95
- the ``build_py`` command. On the other hand, a command that produces
96
- platform-specific files **CAN** initialize ``build_lib`` by copying its value from
97
- the ``build_ext`` command. In general this is done inside the ``finalize_options``
98
- method with the help of the ``set_undefined_options`` command::
99
-
100
- def finalize_options(self):
101
- self.set_undefined_options("build_py", ("build_lib", "build_lib"))
102
- ...
103
- """
104
-
105
- def initialize_options(self):
106
- """(Required by the original :class:`setuptools.Command` interface)"""
107
-
108
- def finalize_options(self):
109
- """(Required by the original :class:`setuptools.Command` interface)"""
110
-
111
- def run(self):
112
- """(Required by the original :class:`setuptools.Command` interface)"""
113
-
114
- def get_source_files(self) -> List[str]:
115
- """
116
- Return a list of all files that are used by the command to create the expected
117
- outputs.
118
- For example, if your build command transpiles Java files into Python, you should
119
- list here all the Java files.
120
- The primary purpose of this function is to help populating the ``sdist``
121
- with all the files necessary to build the distribution.
122
- All files should be strings relative to the project root directory.
123
- """
124
-
125
- def get_outputs(self) -> List[str]:
126
- """
127
- Return a list of files intended for distribution as they would have been
128
- produced by the build.
129
- These files should be strings in the form of
130
- ``"{build_lib}/destination/file/path"``.
131
-
132
- .. note::
133
- The return value of ``get_output()`` should include all files used as keys
134
- in ``get_output_mapping()`` plus files that are generated during the build
135
- and don't correspond to any source file already present in the project.
136
- """
137
-
138
- def get_output_mapping(self) -> Dict[str, str]:
139
- """
140
- Return a mapping between destination files as they would be produced by the
141
- build (dict keys) into the respective existing (source) files (dict values).
142
- Existing (source) files should be represented as strings relative to the project
143
- root directory.
144
- Destination files should be strings in the form of
145
- ``"{build_lib}/destination/file/path"``.
146
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/config/pyprojecttoml.py DELETED
@@ -1,493 +0,0 @@
1
- """
2
- Load setuptools configuration from ``pyproject.toml`` files.
3
-
4
- **PRIVATE MODULE**: API reserved for setuptools internal usage only.
5
- """
6
- import logging
7
- import os
8
- import warnings
9
- from contextlib import contextmanager
10
- from functools import partial
11
- from typing import TYPE_CHECKING, Callable, Dict, Optional, Mapping, Union
12
-
13
- from setuptools.errors import FileError, OptionError
14
-
15
- from . import expand as _expand
16
- from ._apply_pyprojecttoml import apply as _apply
17
- from ._apply_pyprojecttoml import _PREVIOUSLY_DEFINED, _WouldIgnoreField
18
-
19
- if TYPE_CHECKING:
20
- from setuptools.dist import Distribution # noqa
21
-
22
- _Path = Union[str, os.PathLike]
23
- _logger = logging.getLogger(__name__)
24
-
25
-
26
- def load_file(filepath: _Path) -> dict:
27
- from setuptools.extern import tomli # type: ignore
28
-
29
- with open(filepath, "rb") as file:
30
- return tomli.load(file)
31
-
32
-
33
- def validate(config: dict, filepath: _Path) -> bool:
34
- from . import _validate_pyproject as validator
35
-
36
- trove_classifier = validator.FORMAT_FUNCTIONS.get("trove-classifier")
37
- if hasattr(trove_classifier, "_disable_download"):
38
- # Improve reproducibility by default. See issue 31 for validate-pyproject.
39
- trove_classifier._disable_download() # type: ignore
40
-
41
- try:
42
- return validator.validate(config)
43
- except validator.ValidationError as ex:
44
- summary = f"configuration error: {ex.summary}"
45
- if ex.name.strip("`") != "project":
46
- # Probably it is just a field missing/misnamed, not worthy the verbosity...
47
- _logger.debug(summary)
48
- _logger.debug(ex.details)
49
-
50
- error = f"invalid pyproject.toml config: {ex.name}."
51
- raise ValueError(f"{error}\n{summary}") from None
52
-
53
-
54
- def apply_configuration(
55
- dist: "Distribution",
56
- filepath: _Path,
57
- ignore_option_errors=False,
58
- ) -> "Distribution":
59
- """Apply the configuration from a ``pyproject.toml`` file into an existing
60
- distribution object.
61
- """
62
- config = read_configuration(filepath, True, ignore_option_errors, dist)
63
- return _apply(dist, config, filepath)
64
-
65
-
66
- def read_configuration(
67
- filepath: _Path,
68
- expand=True,
69
- ignore_option_errors=False,
70
- dist: Optional["Distribution"] = None,
71
- ):
72
- """Read given configuration file and returns options from it as a dict.
73
-
74
- :param str|unicode filepath: Path to configuration file in the ``pyproject.toml``
75
- format.
76
-
77
- :param bool expand: Whether to expand directives and other computed values
78
- (i.e. post-process the given configuration)
79
-
80
- :param bool ignore_option_errors: Whether to silently ignore
81
- options, values of which could not be resolved (e.g. due to exceptions
82
- in directives such as file:, attr:, etc.).
83
- If False exceptions are propagated as expected.
84
-
85
- :param Distribution|None: Distribution object to which the configuration refers.
86
- If not given a dummy object will be created and discarded after the
87
- configuration is read. This is used for auto-discovery of packages in the case
88
- a dynamic configuration (e.g. ``attr`` or ``cmdclass``) is expanded.
89
- When ``expand=False`` this object is simply ignored.
90
-
91
- :rtype: dict
92
- """
93
- filepath = os.path.abspath(filepath)
94
-
95
- if not os.path.isfile(filepath):
96
- raise FileError(f"Configuration file {filepath!r} does not exist.")
97
-
98
- asdict = load_file(filepath) or {}
99
- project_table = asdict.get("project", {})
100
- tool_table = asdict.get("tool", {})
101
- setuptools_table = tool_table.get("setuptools", {})
102
- if not asdict or not (project_table or setuptools_table):
103
- return {} # User is not using pyproject to configure setuptools
104
-
105
- if setuptools_table:
106
- # TODO: Remove the following once the feature stabilizes:
107
- msg = "Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*."
108
- warnings.warn(msg, _BetaConfiguration)
109
-
110
- # There is an overall sense in the community that making include_package_data=True
111
- # the default would be an improvement.
112
- # `ini2toml` backfills include_package_data=False when nothing is explicitly given,
113
- # therefore setting a default here is backwards compatible.
114
- orig_setuptools_table = setuptools_table.copy()
115
- if dist and getattr(dist, "include_package_data") is not None:
116
- setuptools_table.setdefault("include-package-data", dist.include_package_data)
117
- else:
118
- setuptools_table.setdefault("include-package-data", True)
119
- # Persist changes:
120
- asdict["tool"] = tool_table
121
- tool_table["setuptools"] = setuptools_table
122
-
123
- try:
124
- # Don't complain about unrelated errors (e.g. tools not using the "tool" table)
125
- subset = {"project": project_table, "tool": {"setuptools": setuptools_table}}
126
- validate(subset, filepath)
127
- except Exception as ex:
128
- # TODO: Remove the following once the feature stabilizes:
129
- if _skip_bad_config(project_table, orig_setuptools_table, dist):
130
- return {}
131
- # TODO: After the previous statement is removed the try/except can be replaced
132
- # by the _ignore_errors context manager.
133
- if ignore_option_errors:
134
- _logger.debug(f"ignored error: {ex.__class__.__name__} - {ex}")
135
- else:
136
- raise # re-raise exception
137
-
138
- if expand:
139
- root_dir = os.path.dirname(filepath)
140
- return expand_configuration(asdict, root_dir, ignore_option_errors, dist)
141
-
142
- return asdict
143
-
144
-
145
- def _skip_bad_config(
146
- project_cfg: dict, setuptools_cfg: dict, dist: Optional["Distribution"]
147
- ) -> bool:
148
- """Be temporarily forgiving with invalid ``pyproject.toml``"""
149
- # See pypa/setuptools#3199 and pypa/cibuildwheel#1064
150
-
151
- if dist is None or (
152
- dist.metadata.name is None
153
- and dist.metadata.version is None
154
- and dist.install_requires is None
155
- ):
156
- # It seems that the build is not getting any configuration from other places
157
- return False
158
-
159
- if setuptools_cfg:
160
- # If `[tool.setuptools]` is set, then `pyproject.toml` config is intentional
161
- return False
162
-
163
- given_config = set(project_cfg.keys())
164
- popular_subset = {"name", "version", "python_requires", "requires-python"}
165
- if given_config <= popular_subset:
166
- # It seems that the docs in cibuildtool has been inadvertently encouraging users
167
- # to create `pyproject.toml` files that are not compliant with the standards.
168
- # Let's be forgiving for the time being.
169
- warnings.warn(_InvalidFile.message(), _InvalidFile, stacklevel=2)
170
- return True
171
-
172
- return False
173
-
174
-
175
- def expand_configuration(
176
- config: dict,
177
- root_dir: Optional[_Path] = None,
178
- ignore_option_errors: bool = False,
179
- dist: Optional["Distribution"] = None,
180
- ) -> dict:
181
- """Given a configuration with unresolved fields (e.g. dynamic, cmdclass, ...)
182
- find their final values.
183
-
184
- :param dict config: Dict containing the configuration for the distribution
185
- :param str root_dir: Top-level directory for the distribution/project
186
- (the same directory where ``pyproject.toml`` is place)
187
- :param bool ignore_option_errors: see :func:`read_configuration`
188
- :param Distribution|None: Distribution object to which the configuration refers.
189
- If not given a dummy object will be created and discarded after the
190
- configuration is read. Used in the case a dynamic configuration
191
- (e.g. ``attr`` or ``cmdclass``).
192
-
193
- :rtype: dict
194
- """
195
- return _ConfigExpander(config, root_dir, ignore_option_errors, dist).expand()
196
-
197
-
198
- class _ConfigExpander:
199
- def __init__(
200
- self,
201
- config: dict,
202
- root_dir: Optional[_Path] = None,
203
- ignore_option_errors: bool = False,
204
- dist: Optional["Distribution"] = None,
205
- ):
206
- self.config = config
207
- self.root_dir = root_dir or os.getcwd()
208
- self.project_cfg = config.get("project", {})
209
- self.dynamic = self.project_cfg.get("dynamic", [])
210
- self.setuptools_cfg = config.get("tool", {}).get("setuptools", {})
211
- self.dynamic_cfg = self.setuptools_cfg.get("dynamic", {})
212
- self.ignore_option_errors = ignore_option_errors
213
- self._dist = dist
214
-
215
- def _ensure_dist(self) -> "Distribution":
216
- from setuptools.dist import Distribution
217
-
218
- attrs = {"src_root": self.root_dir, "name": self.project_cfg.get("name", None)}
219
- return self._dist or Distribution(attrs)
220
-
221
- def _process_field(self, container: dict, field: str, fn: Callable):
222
- if field in container:
223
- with _ignore_errors(self.ignore_option_errors):
224
- container[field] = fn(container[field])
225
-
226
- def _canonic_package_data(self, field="package-data"):
227
- package_data = self.setuptools_cfg.get(field, {})
228
- return _expand.canonic_package_data(package_data)
229
-
230
- def expand(self):
231
- self._expand_packages()
232
- self._canonic_package_data()
233
- self._canonic_package_data("exclude-package-data")
234
-
235
- # A distribution object is required for discovering the correct package_dir
236
- dist = self._ensure_dist()
237
- ctx = _EnsurePackagesDiscovered(dist, self.project_cfg, self.setuptools_cfg)
238
- with ctx as ensure_discovered:
239
- package_dir = ensure_discovered.package_dir
240
- self._expand_data_files()
241
- self._expand_cmdclass(package_dir)
242
- self._expand_all_dynamic(dist, package_dir)
243
-
244
- return self.config
245
-
246
- def _expand_packages(self):
247
- packages = self.setuptools_cfg.get("packages")
248
- if packages is None or isinstance(packages, (list, tuple)):
249
- return
250
-
251
- find = packages.get("find")
252
- if isinstance(find, dict):
253
- find["root_dir"] = self.root_dir
254
- find["fill_package_dir"] = self.setuptools_cfg.setdefault("package-dir", {})
255
- with _ignore_errors(self.ignore_option_errors):
256
- self.setuptools_cfg["packages"] = _expand.find_packages(**find)
257
-
258
- def _expand_data_files(self):
259
- data_files = partial(_expand.canonic_data_files, root_dir=self.root_dir)
260
- self._process_field(self.setuptools_cfg, "data-files", data_files)
261
-
262
- def _expand_cmdclass(self, package_dir: Mapping[str, str]):
263
- root_dir = self.root_dir
264
- cmdclass = partial(_expand.cmdclass, package_dir=package_dir, root_dir=root_dir)
265
- self._process_field(self.setuptools_cfg, "cmdclass", cmdclass)
266
-
267
- def _expand_all_dynamic(self, dist: "Distribution", package_dir: Mapping[str, str]):
268
- special = ( # need special handling
269
- "version",
270
- "readme",
271
- "entry-points",
272
- "scripts",
273
- "gui-scripts",
274
- "classifiers",
275
- "dependencies",
276
- "optional-dependencies",
277
- )
278
- # `_obtain` functions are assumed to raise appropriate exceptions/warnings.
279
- obtained_dynamic = {
280
- field: self._obtain(dist, field, package_dir)
281
- for field in self.dynamic
282
- if field not in special
283
- }
284
- obtained_dynamic.update(
285
- self._obtain_entry_points(dist, package_dir) or {},
286
- version=self._obtain_version(dist, package_dir),
287
- readme=self._obtain_readme(dist),
288
- classifiers=self._obtain_classifiers(dist),
289
- dependencies=self._obtain_dependencies(dist),
290
- optional_dependencies=self._obtain_optional_dependencies(dist),
291
- )
292
- # `None` indicates there is nothing in `tool.setuptools.dynamic` but the value
293
- # might have already been set by setup.py/extensions, so avoid overwriting.
294
- updates = {k: v for k, v in obtained_dynamic.items() if v is not None}
295
- self.project_cfg.update(updates)
296
-
297
- def _ensure_previously_set(self, dist: "Distribution", field: str):
298
- previous = _PREVIOUSLY_DEFINED[field](dist)
299
- if previous is None and not self.ignore_option_errors:
300
- msg = (
301
- f"No configuration found for dynamic {field!r}.\n"
302
- "Some dynamic fields need to be specified via `tool.setuptools.dynamic`"
303
- "\nothers must be specified via the equivalent attribute in `setup.py`."
304
- )
305
- raise OptionError(msg)
306
-
307
- def _expand_directive(
308
- self, specifier: str, directive, package_dir: Mapping[str, str]
309
- ):
310
- with _ignore_errors(self.ignore_option_errors):
311
- root_dir = self.root_dir
312
- if "file" in directive:
313
- return _expand.read_files(directive["file"], root_dir)
314
- if "attr" in directive:
315
- return _expand.read_attr(directive["attr"], package_dir, root_dir)
316
- raise ValueError(f"invalid `{specifier}`: {directive!r}")
317
- return None
318
-
319
- def _obtain(self, dist: "Distribution", field: str, package_dir: Mapping[str, str]):
320
- if field in self.dynamic_cfg:
321
- return self._expand_directive(
322
- f"tool.setuptools.dynamic.{field}",
323
- self.dynamic_cfg[field],
324
- package_dir,
325
- )
326
- self._ensure_previously_set(dist, field)
327
- return None
328
-
329
- def _obtain_version(self, dist: "Distribution", package_dir: Mapping[str, str]):
330
- # Since plugins can set version, let's silently skip if it cannot be obtained
331
- if "version" in self.dynamic and "version" in self.dynamic_cfg:
332
- return _expand.version(self._obtain(dist, "version", package_dir))
333
- return None
334
-
335
- def _obtain_readme(self, dist: "Distribution") -> Optional[Dict[str, str]]:
336
- if "readme" not in self.dynamic:
337
- return None
338
-
339
- dynamic_cfg = self.dynamic_cfg
340
- if "readme" in dynamic_cfg:
341
- return {
342
- "text": self._obtain(dist, "readme", {}),
343
- "content-type": dynamic_cfg["readme"].get("content-type", "text/x-rst"),
344
- }
345
-
346
- self._ensure_previously_set(dist, "readme")
347
- return None
348
-
349
- def _obtain_entry_points(
350
- self, dist: "Distribution", package_dir: Mapping[str, str]
351
- ) -> Optional[Dict[str, dict]]:
352
- fields = ("entry-points", "scripts", "gui-scripts")
353
- if not any(field in self.dynamic for field in fields):
354
- return None
355
-
356
- text = self._obtain(dist, "entry-points", package_dir)
357
- if text is None:
358
- return None
359
-
360
- groups = _expand.entry_points(text)
361
- expanded = {"entry-points": groups}
362
-
363
- def _set_scripts(field: str, group: str):
364
- if group in groups:
365
- value = groups.pop(group)
366
- if field not in self.dynamic:
367
- msg = _WouldIgnoreField.message(field, value)
368
- warnings.warn(msg, _WouldIgnoreField)
369
- # TODO: Don't set field when support for pyproject.toml stabilizes
370
- # instead raise an error as specified in PEP 621
371
- expanded[field] = value
372
-
373
- _set_scripts("scripts", "console_scripts")
374
- _set_scripts("gui-scripts", "gui_scripts")
375
-
376
- return expanded
377
-
378
- def _obtain_classifiers(self, dist: "Distribution"):
379
- if "classifiers" in self.dynamic:
380
- value = self._obtain(dist, "classifiers", {})
381
- if value:
382
- return value.splitlines()
383
- return None
384
-
385
- def _obtain_dependencies(self, dist: "Distribution"):
386
- if "dependencies" in self.dynamic:
387
- value = self._obtain(dist, "dependencies", {})
388
- if value:
389
- return _parse_requirements_list(value)
390
- return None
391
-
392
- def _obtain_optional_dependencies(self, dist: "Distribution"):
393
- if "optional-dependencies" not in self.dynamic:
394
- return None
395
- if "optional-dependencies" in self.dynamic_cfg:
396
- optional_dependencies_map = self.dynamic_cfg["optional-dependencies"]
397
- assert isinstance(optional_dependencies_map, dict)
398
- return {
399
- group: _parse_requirements_list(self._expand_directive(
400
- f"tool.setuptools.dynamic.optional-dependencies.{group}",
401
- directive,
402
- {},
403
- ))
404
- for group, directive in optional_dependencies_map.items()
405
- }
406
- self._ensure_previously_set(dist, "optional-dependencies")
407
- return None
408
-
409
-
410
- def _parse_requirements_list(value):
411
- return [
412
- line
413
- for line in value.splitlines()
414
- if line.strip() and not line.strip().startswith("#")
415
- ]
416
-
417
-
418
- @contextmanager
419
- def _ignore_errors(ignore_option_errors: bool):
420
- if not ignore_option_errors:
421
- yield
422
- return
423
-
424
- try:
425
- yield
426
- except Exception as ex:
427
- _logger.debug(f"ignored error: {ex.__class__.__name__} - {ex}")
428
-
429
-
430
- class _EnsurePackagesDiscovered(_expand.EnsurePackagesDiscovered):
431
- def __init__(
432
- self, distribution: "Distribution", project_cfg: dict, setuptools_cfg: dict
433
- ):
434
- super().__init__(distribution)
435
- self._project_cfg = project_cfg
436
- self._setuptools_cfg = setuptools_cfg
437
-
438
- def __enter__(self):
439
- """When entering the context, the values of ``packages``, ``py_modules`` and
440
- ``package_dir`` that are missing in ``dist`` are copied from ``setuptools_cfg``.
441
- """
442
- dist, cfg = self._dist, self._setuptools_cfg
443
- package_dir: Dict[str, str] = cfg.setdefault("package-dir", {})
444
- package_dir.update(dist.package_dir or {})
445
- dist.package_dir = package_dir # needs to be the same object
446
-
447
- dist.set_defaults._ignore_ext_modules() # pyproject.toml-specific behaviour
448
-
449
- # Set `name`, `py_modules` and `packages` in dist to short-circuit
450
- # auto-discovery, but avoid overwriting empty lists purposefully set by users.
451
- if dist.metadata.name is None:
452
- dist.metadata.name = self._project_cfg.get("name")
453
- if dist.py_modules is None:
454
- dist.py_modules = cfg.get("py-modules")
455
- if dist.packages is None:
456
- dist.packages = cfg.get("packages")
457
-
458
- return super().__enter__()
459
-
460
- def __exit__(self, exc_type, exc_value, traceback):
461
- """When exiting the context, if values of ``packages``, ``py_modules`` and
462
- ``package_dir`` are missing in ``setuptools_cfg``, copy from ``dist``.
463
- """
464
- # If anything was discovered set them back, so they count in the final config.
465
- self._setuptools_cfg.setdefault("packages", self._dist.packages)
466
- self._setuptools_cfg.setdefault("py-modules", self._dist.py_modules)
467
- return super().__exit__(exc_type, exc_value, traceback)
468
-
469
-
470
- class _BetaConfiguration(UserWarning):
471
- """Explicitly inform users that some `pyproject.toml` configuration is *beta*"""
472
-
473
-
474
- class _InvalidFile(UserWarning):
475
- """The given `pyproject.toml` file is invalid and would be ignored.
476
- !!\n\n
477
- ############################
478
- # Invalid `pyproject.toml` #
479
- ############################
480
-
481
- Any configurations in `pyproject.toml` will be ignored.
482
- Please note that future releases of setuptools will halt the build process
483
- if an invalid file is given.
484
-
485
- To prevent setuptools from considering `pyproject.toml` please
486
- DO NOT include the `[project]` or `[tool.setuptools]` tables in your file.
487
- \n\n!!
488
- """
489
-
490
- @classmethod
491
- def message(cls):
492
- from inspect import cleandoc
493
- return cleandoc(cls.__doc__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CALM/Dashboard/streamlit_observable/frontend/build/precache-manifest.2e1db2924cb1e112608cee049b0d33cc.js DELETED
@@ -1,26 +0,0 @@
1
- self.__precacheManifest = (self.__precacheManifest || []).concat([
2
- {
3
- "revision": "1c6ba26604bc12847ab74fcdb45b2542",
4
- "url": "./index.html"
5
- },
6
- {
7
- "revision": "5a67f673dcdf30bf693d",
8
- "url": "./static/js/2.b1c975ff.chunk.js"
9
- },
10
- {
11
- "revision": "9b318b6fb13190fe82c0677e9264b3c7",
12
- "url": "./static/js/2.b1c975ff.chunk.js.LICENSE.txt"
13
- },
14
- {
15
- "revision": "3301eac1eaca974776ae",
16
- "url": "./static/js/main.fc603b94.chunk.js"
17
- },
18
- {
19
- "revision": "6515c66d2a8747a146d578e1c038a822",
20
- "url": "./static/js/main.fc603b94.chunk.js.LICENSE.txt"
21
- },
22
- {
23
- "revision": "7c26bca7e16783d14d15",
24
- "url": "./static/js/runtime-main.11ec9aca.js"
25
- }
26
- ]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/deform_conv.py DELETED
@@ -1,494 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import math
3
- from functools import lru_cache
4
- import torch
5
- from torch import nn
6
- from torch.autograd import Function
7
- from torch.autograd.function import once_differentiable
8
- from torch.nn.modules.utils import _pair
9
-
10
- from detectron2 import _C
11
-
12
- from .wrappers import _NewEmptyTensorOp
13
-
14
-
15
- class _DeformConv(Function):
16
- @staticmethod
17
- def forward(
18
- ctx,
19
- input,
20
- offset,
21
- weight,
22
- stride=1,
23
- padding=0,
24
- dilation=1,
25
- groups=1,
26
- deformable_groups=1,
27
- im2col_step=64,
28
- ):
29
- if input is not None and input.dim() != 4:
30
- raise ValueError(
31
- "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim())
32
- )
33
- ctx.stride = _pair(stride)
34
- ctx.padding = _pair(padding)
35
- ctx.dilation = _pair(dilation)
36
- ctx.groups = groups
37
- ctx.deformable_groups = deformable_groups
38
- ctx.im2col_step = im2col_step
39
-
40
- ctx.save_for_backward(input, offset, weight)
41
-
42
- output = input.new_empty(
43
- _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)
44
- )
45
-
46
- ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
47
-
48
- if not input.is_cuda:
49
- raise NotImplementedError
50
- else:
51
- cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
52
- assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
53
-
54
- _C.deform_conv_forward(
55
- input,
56
- weight,
57
- offset,
58
- output,
59
- ctx.bufs_[0],
60
- ctx.bufs_[1],
61
- weight.size(3),
62
- weight.size(2),
63
- ctx.stride[1],
64
- ctx.stride[0],
65
- ctx.padding[1],
66
- ctx.padding[0],
67
- ctx.dilation[1],
68
- ctx.dilation[0],
69
- ctx.groups,
70
- ctx.deformable_groups,
71
- cur_im2col_step,
72
- )
73
- return output
74
-
75
- @staticmethod
76
- @once_differentiable
77
- def backward(ctx, grad_output):
78
- input, offset, weight = ctx.saved_tensors
79
-
80
- grad_input = grad_offset = grad_weight = None
81
-
82
- if not grad_output.is_cuda:
83
- raise NotImplementedError
84
- else:
85
- cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
86
- assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
87
-
88
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
89
- grad_input = torch.zeros_like(input)
90
- grad_offset = torch.zeros_like(offset)
91
- _C.deform_conv_backward_input(
92
- input,
93
- offset,
94
- grad_output,
95
- grad_input,
96
- grad_offset,
97
- weight,
98
- ctx.bufs_[0],
99
- weight.size(3),
100
- weight.size(2),
101
- ctx.stride[1],
102
- ctx.stride[0],
103
- ctx.padding[1],
104
- ctx.padding[0],
105
- ctx.dilation[1],
106
- ctx.dilation[0],
107
- ctx.groups,
108
- ctx.deformable_groups,
109
- cur_im2col_step,
110
- )
111
-
112
- if ctx.needs_input_grad[2]:
113
- grad_weight = torch.zeros_like(weight)
114
- _C.deform_conv_backward_filter(
115
- input,
116
- offset,
117
- grad_output,
118
- grad_weight,
119
- ctx.bufs_[0],
120
- ctx.bufs_[1],
121
- weight.size(3),
122
- weight.size(2),
123
- ctx.stride[1],
124
- ctx.stride[0],
125
- ctx.padding[1],
126
- ctx.padding[0],
127
- ctx.dilation[1],
128
- ctx.dilation[0],
129
- ctx.groups,
130
- ctx.deformable_groups,
131
- 1,
132
- cur_im2col_step,
133
- )
134
-
135
- return grad_input, grad_offset, grad_weight, None, None, None, None, None, None
136
-
137
- @staticmethod
138
- def _output_size(input, weight, padding, dilation, stride):
139
- channels = weight.size(0)
140
- output_size = (input.size(0), channels)
141
- for d in range(input.dim() - 2):
142
- in_size = input.size(d + 2)
143
- pad = padding[d]
144
- kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
145
- stride_ = stride[d]
146
- output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,)
147
- if not all(map(lambda s: s > 0, output_size)):
148
- raise ValueError(
149
- "convolution input is too small (output would be {})".format(
150
- "x".join(map(str, output_size))
151
- )
152
- )
153
- return output_size
154
-
155
- @staticmethod
156
- @lru_cache(maxsize=128)
157
- def _cal_im2col_step(input_size, default_size):
158
- """
159
- Calculate proper im2col step size, which should be divisible by input_size and not larger
160
- than prefer_size. Meanwhile the step size should be as large as possible to be more
161
- efficient. So we choose the largest one among all divisors of input_size which are smaller
162
- than prefer_size.
163
- :param input_size: input batch size .
164
- :param default_size: default preferred im2col step size.
165
- :return: the largest proper step size.
166
- """
167
- if input_size <= default_size:
168
- return input_size
169
- best_step = 1
170
- for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)):
171
- if input_size % step == 0:
172
- if input_size // step <= default_size:
173
- return input_size // step
174
- best_step = step
175
-
176
- return best_step
177
-
178
-
179
- class _ModulatedDeformConv(Function):
180
- @staticmethod
181
- def forward(
182
- ctx,
183
- input,
184
- offset,
185
- mask,
186
- weight,
187
- bias=None,
188
- stride=1,
189
- padding=0,
190
- dilation=1,
191
- groups=1,
192
- deformable_groups=1,
193
- ):
194
- ctx.stride = stride
195
- ctx.padding = padding
196
- ctx.dilation = dilation
197
- ctx.groups = groups
198
- ctx.deformable_groups = deformable_groups
199
- ctx.with_bias = bias is not None
200
- if not ctx.with_bias:
201
- bias = input.new_empty(1) # fake tensor
202
- if not input.is_cuda:
203
- raise NotImplementedError
204
- if (
205
- weight.requires_grad
206
- or mask.requires_grad
207
- or offset.requires_grad
208
- or input.requires_grad
209
- ):
210
- ctx.save_for_backward(input, offset, mask, weight, bias)
211
- output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight))
212
- ctx._bufs = [input.new_empty(0), input.new_empty(0)]
213
- _C.modulated_deform_conv_forward(
214
- input,
215
- weight,
216
- bias,
217
- ctx._bufs[0],
218
- offset,
219
- mask,
220
- output,
221
- ctx._bufs[1],
222
- weight.shape[2],
223
- weight.shape[3],
224
- ctx.stride,
225
- ctx.stride,
226
- ctx.padding,
227
- ctx.padding,
228
- ctx.dilation,
229
- ctx.dilation,
230
- ctx.groups,
231
- ctx.deformable_groups,
232
- ctx.with_bias,
233
- )
234
- return output
235
-
236
- @staticmethod
237
- @once_differentiable
238
- def backward(ctx, grad_output):
239
- if not grad_output.is_cuda:
240
- raise NotImplementedError
241
- input, offset, mask, weight, bias = ctx.saved_tensors
242
- grad_input = torch.zeros_like(input)
243
- grad_offset = torch.zeros_like(offset)
244
- grad_mask = torch.zeros_like(mask)
245
- grad_weight = torch.zeros_like(weight)
246
- grad_bias = torch.zeros_like(bias)
247
- _C.modulated_deform_conv_backward(
248
- input,
249
- weight,
250
- bias,
251
- ctx._bufs[0],
252
- offset,
253
- mask,
254
- ctx._bufs[1],
255
- grad_input,
256
- grad_weight,
257
- grad_bias,
258
- grad_offset,
259
- grad_mask,
260
- grad_output,
261
- weight.shape[2],
262
- weight.shape[3],
263
- ctx.stride,
264
- ctx.stride,
265
- ctx.padding,
266
- ctx.padding,
267
- ctx.dilation,
268
- ctx.dilation,
269
- ctx.groups,
270
- ctx.deformable_groups,
271
- ctx.with_bias,
272
- )
273
- if not ctx.with_bias:
274
- grad_bias = None
275
-
276
- return (
277
- grad_input,
278
- grad_offset,
279
- grad_mask,
280
- grad_weight,
281
- grad_bias,
282
- None,
283
- None,
284
- None,
285
- None,
286
- None,
287
- )
288
-
289
- @staticmethod
290
- def _infer_shape(ctx, input, weight):
291
- n = input.size(0)
292
- channels_out = weight.size(0)
293
- height, width = input.shape[2:4]
294
- kernel_h, kernel_w = weight.shape[2:4]
295
- height_out = (
296
- height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)
297
- ) // ctx.stride + 1
298
- width_out = (
299
- width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)
300
- ) // ctx.stride + 1
301
- return n, channels_out, height_out, width_out
302
-
303
-
304
- deform_conv = _DeformConv.apply
305
- modulated_deform_conv = _ModulatedDeformConv.apply
306
-
307
-
308
- class DeformConv(nn.Module):
309
- def __init__(
310
- self,
311
- in_channels,
312
- out_channels,
313
- kernel_size,
314
- stride=1,
315
- padding=0,
316
- dilation=1,
317
- groups=1,
318
- deformable_groups=1,
319
- bias=False,
320
- norm=None,
321
- activation=None,
322
- ):
323
- """
324
- Deformable convolution.
325
-
326
- Arguments are similar to :class:`Conv2D`. Extra arguments:
327
-
328
- Args:
329
- deformable_groups (int): number of groups used in deformable convolution.
330
- norm (nn.Module, optional): a normalization layer
331
- activation (callable(Tensor) -> Tensor): a callable activation function
332
- """
333
- super(DeformConv, self).__init__()
334
-
335
- assert not bias
336
- assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
337
- in_channels, groups
338
- )
339
- assert (
340
- out_channels % groups == 0
341
- ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
342
-
343
- self.in_channels = in_channels
344
- self.out_channels = out_channels
345
- self.kernel_size = _pair(kernel_size)
346
- self.stride = _pair(stride)
347
- self.padding = _pair(padding)
348
- self.dilation = _pair(dilation)
349
- self.groups = groups
350
- self.deformable_groups = deformable_groups
351
- self.norm = norm
352
- self.activation = activation
353
-
354
- self.weight = nn.Parameter(
355
- torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
356
- )
357
- self.bias = None
358
-
359
- nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
360
-
361
- def forward(self, x, offset):
362
- if x.numel() == 0:
363
- # When input is empty, we want to return a empty tensor with "correct" shape,
364
- # So that the following operations will not panic
365
- # if they check for the shape of the tensor.
366
- # This computes the height and width of the output tensor
367
- output_shape = [
368
- (i + 2 * p - (di * (k - 1) + 1)) // s + 1
369
- for i, p, di, k, s in zip(
370
- x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
371
- )
372
- ]
373
- output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
374
- return _NewEmptyTensorOp.apply(x, output_shape)
375
-
376
- x = deform_conv(
377
- x,
378
- offset,
379
- self.weight,
380
- self.stride,
381
- self.padding,
382
- self.dilation,
383
- self.groups,
384
- self.deformable_groups,
385
- )
386
- if self.norm is not None:
387
- x = self.norm(x)
388
- if self.activation is not None:
389
- x = self.activation(x)
390
- return x
391
-
392
- def extra_repr(self):
393
- tmpstr = "in_channels=" + str(self.in_channels)
394
- tmpstr += ", out_channels=" + str(self.out_channels)
395
- tmpstr += ", kernel_size=" + str(self.kernel_size)
396
- tmpstr += ", stride=" + str(self.stride)
397
- tmpstr += ", padding=" + str(self.padding)
398
- tmpstr += ", dilation=" + str(self.dilation)
399
- tmpstr += ", groups=" + str(self.groups)
400
- tmpstr += ", deformable_groups=" + str(self.deformable_groups)
401
- tmpstr += ", bias=False"
402
- return tmpstr
403
-
404
-
405
- class ModulatedDeformConv(nn.Module):
406
- def __init__(
407
- self,
408
- in_channels,
409
- out_channels,
410
- kernel_size,
411
- stride=1,
412
- padding=0,
413
- dilation=1,
414
- groups=1,
415
- deformable_groups=1,
416
- bias=True,
417
- norm=None,
418
- activation=None,
419
- ):
420
- """
421
- Modulated deformable convolution.
422
-
423
- Arguments are similar to :class:`Conv2D`. Extra arguments:
424
-
425
- Args:
426
- deformable_groups (int): number of groups used in deformable convolution.
427
- norm (nn.Module, optional): a normalization layer
428
- activation (callable(Tensor) -> Tensor): a callable activation function
429
- """
430
- super(ModulatedDeformConv, self).__init__()
431
- self.in_channels = in_channels
432
- self.out_channels = out_channels
433
- self.kernel_size = _pair(kernel_size)
434
- self.stride = stride
435
- self.padding = padding
436
- self.dilation = dilation
437
- self.groups = groups
438
- self.deformable_groups = deformable_groups
439
- self.with_bias = bias
440
- self.norm = norm
441
- self.activation = activation
442
-
443
- self.weight = nn.Parameter(
444
- torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
445
- )
446
- if bias:
447
- self.bias = nn.Parameter(torch.Tensor(out_channels))
448
- else:
449
- self.bias = None
450
-
451
- nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
452
- if self.bias is not None:
453
- nn.init.constant_(self.bias, 0)
454
-
455
- def forward(self, x, offset, mask):
456
- if x.numel() == 0:
457
- output_shape = [
458
- (i + 2 * p - (di * (k - 1) + 1)) // s + 1
459
- for i, p, di, k, s in zip(
460
- x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
461
- )
462
- ]
463
- output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
464
- return _NewEmptyTensorOp.apply(x, output_shape)
465
-
466
- x = modulated_deform_conv(
467
- x,
468
- offset,
469
- mask,
470
- self.weight,
471
- self.bias,
472
- self.stride,
473
- self.padding,
474
- self.dilation,
475
- self.groups,
476
- self.deformable_groups,
477
- )
478
- if self.norm is not None:
479
- x = self.norm(x)
480
- if self.activation is not None:
481
- x = self.activation(x)
482
- return x
483
-
484
- def extra_repr(self):
485
- tmpstr = "in_channels=" + str(self.in_channels)
486
- tmpstr += ", out_channels=" + str(self.out_channels)
487
- tmpstr += ", kernel_size=" + str(self.kernel_size)
488
- tmpstr += ", stride=" + str(self.stride)
489
- tmpstr += ", padding=" + str(self.padding)
490
- tmpstr += ", dilation=" + str(self.dilation)
491
- tmpstr += ", groups=" + str(self.groups)
492
- tmpstr += ", deformable_groups=" + str(self.deformable_groups)
493
- tmpstr += ", bias=" + str(self.with_bias)
494
- return tmpstr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TridentNet/tridentnet/config.py DELETED
@@ -1,26 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- from detectron2.config import CfgNode as CN
5
-
6
-
7
- def add_tridentnet_config(cfg):
8
- """
9
- Add config for tridentnet.
10
- """
11
- _C = cfg
12
-
13
- _C.MODEL.TRIDENT = CN()
14
-
15
- # Number of branches for TridentNet.
16
- _C.MODEL.TRIDENT.NUM_BRANCH = 3
17
- # Specify the dilations for each branch.
18
- _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3]
19
- # Specify the stage for applying trident blocks. Default stage is Res4 according to the
20
- # TridentNet paper.
21
- _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4"
22
- # Specify the test branch index TridentNet Fast inference:
23
- # - use -1 to aggregate results of all branches during inference.
24
- # - otherwise, only using specified branch for fast inference. Recommended setting is
25
- # to use the middle branch.
26
- _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TridentNet/tridentnet/trident_conv.py DELETED
@@ -1,107 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
- from torch.nn.modules.utils import _pair
6
-
7
- from detectron2.layers.wrappers import _NewEmptyTensorOp
8
-
9
-
10
- class TridentConv(nn.Module):
11
- def __init__(
12
- self,
13
- in_channels,
14
- out_channels,
15
- kernel_size,
16
- stride=1,
17
- paddings=0,
18
- dilations=1,
19
- groups=1,
20
- num_branch=1,
21
- test_branch_idx=-1,
22
- bias=False,
23
- norm=None,
24
- activation=None,
25
- ):
26
- super(TridentConv, self).__init__()
27
- self.in_channels = in_channels
28
- self.out_channels = out_channels
29
- self.kernel_size = _pair(kernel_size)
30
- self.num_branch = num_branch
31
- self.stride = _pair(stride)
32
- self.groups = groups
33
- self.with_bias = bias
34
- if isinstance(paddings, int):
35
- paddings = [paddings] * self.num_branch
36
- if isinstance(dilations, int):
37
- dilations = [dilations] * self.num_branch
38
- self.paddings = [_pair(padding) for padding in paddings]
39
- self.dilations = [_pair(dilation) for dilation in dilations]
40
- self.test_branch_idx = test_branch_idx
41
- self.norm = norm
42
- self.activation = activation
43
-
44
- assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1
45
-
46
- self.weight = nn.Parameter(
47
- torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
48
- )
49
- if bias:
50
- self.bias = nn.Parameter(torch.Tensor(out_channels))
51
- else:
52
- self.bias = None
53
-
54
- nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
55
- if self.bias is not None:
56
- nn.init.constant_(self.bias, 0)
57
-
58
- def forward(self, inputs):
59
- num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
60
- assert len(inputs) == num_branch
61
-
62
- if inputs[0].numel() == 0:
63
- output_shape = [
64
- (i + 2 * p - (di * (k - 1) + 1)) // s + 1
65
- for i, p, di, k, s in zip(
66
- inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
67
- )
68
- ]
69
- output_shape = [input[0].shape[0], self.weight.shape[0]] + output_shape
70
- return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs]
71
-
72
- if self.training or self.test_branch_idx == -1:
73
- outputs = [
74
- F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation, self.groups)
75
- for input, dilation, padding in zip(inputs, self.dilations, self.paddings)
76
- ]
77
- else:
78
- outputs = [
79
- F.conv2d(
80
- inputs[0],
81
- self.weight,
82
- self.bias,
83
- self.stride,
84
- self.paddings[self.test_branch_idx],
85
- self.dilations[self.test_branch_idx],
86
- self.groups,
87
- )
88
- ]
89
-
90
- if self.norm is not None:
91
- outputs = [self.norm(x) for x in outputs]
92
- if self.activation is not None:
93
- outputs = [self.activation(x) for x in outputs]
94
- return outputs
95
-
96
- def extra_repr(self):
97
- tmpstr = "in_channels=" + str(self.in_channels)
98
- tmpstr += ", out_channels=" + str(self.out_channels)
99
- tmpstr += ", kernel_size=" + str(self.kernel_size)
100
- tmpstr += ", num_branch=" + str(self.num_branch)
101
- tmpstr += ", test_branch_idx=" + str(self.test_branch_idx)
102
- tmpstr += ", stride=" + str(self.stride)
103
- tmpstr += ", paddings=" + str(self.paddings)
104
- tmpstr += ", dilations=" + str(self.dilations)
105
- tmpstr += ", groups=" + str(self.groups)
106
- tmpstr += ", bias=" + str(self.with_bias)
107
- return tmpstr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pydiffvg/pixel_filter.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- import pydiffvg
3
-
4
- class PixelFilter:
5
- def __init__(self,
6
- type,
7
- radius = torch.tensor(0.5)):
8
- self.type = type
9
- self.radius = radius
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/mismatch.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special mismatch functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/get_value.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits get_value
22
- #include <thrust/system/cpp/detail/get_value.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/infer.py DELETED
@@ -1,118 +0,0 @@
1
- from argparse import ArgumentParser
2
-
3
- from mmdet.apis import inference_detector, init_detector, show_result_pyplot
4
- from mmdet.core.mask.utils import encode_mask_results
5
- import numpy as np
6
- import mmcv
7
- import torch
8
- from imantics import Polygons, Mask
9
- import json
10
- import os
11
- import cv2, glob
12
-
13
- class detections():
14
- def __init__(self, cfg_path, device, model_path = 'data/models/walt_vehicle.pth', threshold=0.85):
15
- self.model = init_detector(cfg_path, model_path, device=device)
16
- self.all_preds = []
17
- self.all_scores = []
18
- self.index = []
19
- self.score_thr = threshold
20
- self.result = []
21
- self.record_dict = {'model': cfg_path,'results': []}
22
- self.detect_count = []
23
-
24
-
25
- def run_on_image(self, image):
26
- self.result = inference_detector(self.model, image)
27
- image_labelled = self.model.show_result(image, self.result, score_thr=self.score_thr)
28
- return image_labelled
29
-
30
- def process_output(self, count):
31
- result = self.result
32
- infer_result = {'url': count,
33
- 'boxes': [],
34
- 'scores': [],
35
- 'keypoints': [],
36
- 'segmentation': [],
37
- 'label_ids': [],
38
- 'track': [],
39
- 'labels': []}
40
-
41
- if isinstance(result, tuple):
42
- bbox_result, segm_result = result
43
- #segm_result = encode_mask_results(segm_result)
44
- if isinstance(segm_result, tuple):
45
- segm_result = segm_result[0] # ms rcnn
46
- bboxes = np.vstack(bbox_result)
47
- labels = [np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)]
48
-
49
- labels = np.concatenate(labels)
50
- segms = None
51
- if segm_result is not None and len(labels) > 0: # non empty
52
- segms = mmcv.concat_list(segm_result)
53
- if isinstance(segms[0], torch.Tensor):
54
- segms = torch.stack(segms, dim=0).detach().cpu().numpy()
55
- else:
56
- segms = np.stack(segms, axis=0)
57
-
58
- for i, (bbox, label, segm) in enumerate(zip(bboxes, labels, segms)):
59
- if bbox[-1].item() <0.3:
60
- continue
61
- box = [bbox[0].item(), bbox[1].item(), bbox[2].item(), bbox[3].item()]
62
- polygons = Mask(segm).polygons()
63
-
64
- infer_result['boxes'].append(box)
65
- infer_result['segmentation'].append(polygons.segmentation)
66
- infer_result['scores'].append(bbox[-1].item())
67
- infer_result['labels'].append(self.model.CLASSES[label])
68
- infer_result['label_ids'].append(label)
69
- self.record_dict['results'].append(infer_result)
70
- self.detect_count = labels
71
-
72
- def write_json(self, filename):
73
- with open(filename + '.json', 'w') as f:
74
- json.dump(self.record_dict, f)
75
-
76
-
77
- def main():
78
- if torch.cuda.is_available() == False:
79
- device='cpu'
80
- else:
81
- device='cuda:0'
82
- detect_people = detections('configs/walt/walt_people.py', device, model_path='data/models/walt_people.pth')
83
- detect = detections('configs/walt/walt_vehicle.py', device, model_path='data/models/walt_vehicle.pth')
84
- filenames = sorted(glob.glob('demo/images/*'))
85
- count = 0
86
- for filename in filenames:
87
- img=cv2.imread(filename)
88
- try:
89
- img = detect_people.run_on_image(img)
90
- img = detect.run_on_image(img)
91
- except:
92
- continue
93
- count=count+1
94
-
95
- try:
96
- import os
97
- os.makedirs(os.path.dirname(filename.replace('demo','demo/results/')))
98
- os.mkdirs(os.path.dirname(filename))
99
- except:
100
- print('done')
101
- cv2.imwrite(filename.replace('demo','demo/results/'),img)
102
- if count == 30000:
103
- break
104
- try:
105
- detect.process_output(count)
106
- except:
107
- continue
108
- '''
109
-
110
- np.savez('FC', a= detect.record_dict)
111
- with open('check.json', 'w') as f:
112
- json.dump(detect.record_dict, f)
113
- detect.write_json('seq3')
114
- asas
115
- detect.process_output(0)
116
- '''
117
- if __name__ == "__main__":
118
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/evaluation/mean_ap.py DELETED
@@ -1,469 +0,0 @@
1
- from multiprocessing import Pool
2
-
3
- import mmcv
4
- import numpy as np
5
- from mmcv.utils import print_log
6
- from terminaltables import AsciiTable
7
-
8
- from .bbox_overlaps import bbox_overlaps
9
- from .class_names import get_classes
10
-
11
-
12
- def average_precision(recalls, precisions, mode='area'):
13
- """Calculate average precision (for single or multiple scales).
14
-
15
- Args:
16
- recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
17
- precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
18
- mode (str): 'area' or '11points', 'area' means calculating the area
19
- under precision-recall curve, '11points' means calculating
20
- the average precision of recalls at [0, 0.1, ..., 1]
21
-
22
- Returns:
23
- float or ndarray: calculated average precision
24
- """
25
- no_scale = False
26
- if recalls.ndim == 1:
27
- no_scale = True
28
- recalls = recalls[np.newaxis, :]
29
- precisions = precisions[np.newaxis, :]
30
- assert recalls.shape == precisions.shape and recalls.ndim == 2
31
- num_scales = recalls.shape[0]
32
- ap = np.zeros(num_scales, dtype=np.float32)
33
- if mode == 'area':
34
- zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
35
- ones = np.ones((num_scales, 1), dtype=recalls.dtype)
36
- mrec = np.hstack((zeros, recalls, ones))
37
- mpre = np.hstack((zeros, precisions, zeros))
38
- for i in range(mpre.shape[1] - 1, 0, -1):
39
- mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
40
- for i in range(num_scales):
41
- ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
42
- ap[i] = np.sum(
43
- (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
44
- elif mode == '11points':
45
- for i in range(num_scales):
46
- for thr in np.arange(0, 1 + 1e-3, 0.1):
47
- precs = precisions[i, recalls[i, :] >= thr]
48
- prec = precs.max() if precs.size > 0 else 0
49
- ap[i] += prec
50
- ap /= 11
51
- else:
52
- raise ValueError(
53
- 'Unrecognized mode, only "area" and "11points" are supported')
54
- if no_scale:
55
- ap = ap[0]
56
- return ap
57
-
58
-
59
- def tpfp_imagenet(det_bboxes,
60
- gt_bboxes,
61
- gt_bboxes_ignore=None,
62
- default_iou_thr=0.5,
63
- area_ranges=None):
64
- """Check if detected bboxes are true positive or false positive.
65
-
66
- Args:
67
- det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
68
- gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
69
- gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
70
- of shape (k, 4). Default: None
71
- default_iou_thr (float): IoU threshold to be considered as matched for
72
- medium and large bboxes (small ones have special rules).
73
- Default: 0.5.
74
- area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
75
- in the format [(min1, max1), (min2, max2), ...]. Default: None.
76
-
77
- Returns:
78
- tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
79
- each array is (num_scales, m).
80
- """
81
- # an indicator of ignored gts
82
- gt_ignore_inds = np.concatenate(
83
- (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
84
- np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
85
- # stack gt_bboxes and gt_bboxes_ignore for convenience
86
- gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
87
-
88
- num_dets = det_bboxes.shape[0]
89
- num_gts = gt_bboxes.shape[0]
90
- if area_ranges is None:
91
- area_ranges = [(None, None)]
92
- num_scales = len(area_ranges)
93
- # tp and fp are of shape (num_scales, num_gts), each row is tp or fp
94
- # of a certain scale.
95
- tp = np.zeros((num_scales, num_dets), dtype=np.float32)
96
- fp = np.zeros((num_scales, num_dets), dtype=np.float32)
97
- if gt_bboxes.shape[0] == 0:
98
- if area_ranges == [(None, None)]:
99
- fp[...] = 1
100
- else:
101
- det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
102
- det_bboxes[:, 3] - det_bboxes[:, 1])
103
- for i, (min_area, max_area) in enumerate(area_ranges):
104
- fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
105
- return tp, fp
106
- ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
107
- gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
108
- gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
109
- iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
110
- default_iou_thr)
111
- # sort all detections by scores in descending order
112
- sort_inds = np.argsort(-det_bboxes[:, -1])
113
- for k, (min_area, max_area) in enumerate(area_ranges):
114
- gt_covered = np.zeros(num_gts, dtype=bool)
115
- # if no area range is specified, gt_area_ignore is all False
116
- if min_area is None:
117
- gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
118
- else:
119
- gt_areas = gt_w * gt_h
120
- gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
121
- for i in sort_inds:
122
- max_iou = -1
123
- matched_gt = -1
124
- # find best overlapped available gt
125
- for j in range(num_gts):
126
- # different from PASCAL VOC: allow finding other gts if the
127
- # best overlapped ones are already matched by other det bboxes
128
- if gt_covered[j]:
129
- continue
130
- elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
131
- max_iou = ious[i, j]
132
- matched_gt = j
133
- # there are 4 cases for a det bbox:
134
- # 1. it matches a gt, tp = 1, fp = 0
135
- # 2. it matches an ignored gt, tp = 0, fp = 0
136
- # 3. it matches no gt and within area range, tp = 0, fp = 1
137
- # 4. it matches no gt but is beyond area range, tp = 0, fp = 0
138
- if matched_gt >= 0:
139
- gt_covered[matched_gt] = 1
140
- if not (gt_ignore_inds[matched_gt]
141
- or gt_area_ignore[matched_gt]):
142
- tp[k, i] = 1
143
- elif min_area is None:
144
- fp[k, i] = 1
145
- else:
146
- bbox = det_bboxes[i, :4]
147
- area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
148
- if area >= min_area and area < max_area:
149
- fp[k, i] = 1
150
- return tp, fp
151
-
152
-
153
- def tpfp_default(det_bboxes,
154
- gt_bboxes,
155
- gt_bboxes_ignore=None,
156
- iou_thr=0.5,
157
- area_ranges=None):
158
- """Check if detected bboxes are true positive or false positive.
159
-
160
- Args:
161
- det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
162
- gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
163
- gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
164
- of shape (k, 4). Default: None
165
- iou_thr (float): IoU threshold to be considered as matched.
166
- Default: 0.5.
167
- area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
168
- in the format [(min1, max1), (min2, max2), ...]. Default: None.
169
-
170
- Returns:
171
- tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
172
- each array is (num_scales, m).
173
- """
174
- # an indicator of ignored gts
175
- gt_ignore_inds = np.concatenate(
176
- (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
177
- np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
178
- # stack gt_bboxes and gt_bboxes_ignore for convenience
179
- gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
180
-
181
- num_dets = det_bboxes.shape[0]
182
- num_gts = gt_bboxes.shape[0]
183
- if area_ranges is None:
184
- area_ranges = [(None, None)]
185
- num_scales = len(area_ranges)
186
- # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
187
- # a certain scale
188
- tp = np.zeros((num_scales, num_dets), dtype=np.float32)
189
- fp = np.zeros((num_scales, num_dets), dtype=np.float32)
190
-
191
- # if there is no gt bboxes in this image, then all det bboxes
192
- # within area range are false positives
193
- if gt_bboxes.shape[0] == 0:
194
- if area_ranges == [(None, None)]:
195
- fp[...] = 1
196
- else:
197
- det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
198
- det_bboxes[:, 3] - det_bboxes[:, 1])
199
- for i, (min_area, max_area) in enumerate(area_ranges):
200
- fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
201
- return tp, fp
202
-
203
- ious = bbox_overlaps(det_bboxes, gt_bboxes)
204
- # for each det, the max iou with all gts
205
- ious_max = ious.max(axis=1)
206
- # for each det, which gt overlaps most with it
207
- ious_argmax = ious.argmax(axis=1)
208
- # sort all dets in descending order by scores
209
- sort_inds = np.argsort(-det_bboxes[:, -1])
210
- for k, (min_area, max_area) in enumerate(area_ranges):
211
- gt_covered = np.zeros(num_gts, dtype=bool)
212
- # if no area range is specified, gt_area_ignore is all False
213
- if min_area is None:
214
- gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
215
- else:
216
- gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
217
- gt_bboxes[:, 3] - gt_bboxes[:, 1])
218
- gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
219
- for i in sort_inds:
220
- if ious_max[i] >= iou_thr:
221
- matched_gt = ious_argmax[i]
222
- if not (gt_ignore_inds[matched_gt]
223
- or gt_area_ignore[matched_gt]):
224
- if not gt_covered[matched_gt]:
225
- gt_covered[matched_gt] = True
226
- tp[k, i] = 1
227
- else:
228
- fp[k, i] = 1
229
- # otherwise ignore this detected bbox, tp = 0, fp = 0
230
- elif min_area is None:
231
- fp[k, i] = 1
232
- else:
233
- bbox = det_bboxes[i, :4]
234
- area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
235
- if area >= min_area and area < max_area:
236
- fp[k, i] = 1
237
- return tp, fp
238
-
239
-
240
- def get_cls_results(det_results, annotations, class_id):
241
- """Get det results and gt information of a certain class.
242
-
243
- Args:
244
- det_results (list[list]): Same as `eval_map()`.
245
- annotations (list[dict]): Same as `eval_map()`.
246
- class_id (int): ID of a specific class.
247
-
248
- Returns:
249
- tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
250
- """
251
- cls_dets = [img_res[class_id] for img_res in det_results]
252
- cls_gts = []
253
- cls_gts_ignore = []
254
- for ann in annotations:
255
- gt_inds = ann['labels'] == class_id
256
- cls_gts.append(ann['bboxes'][gt_inds, :])
257
-
258
- if ann.get('labels_ignore', None) is not None:
259
- ignore_inds = ann['labels_ignore'] == class_id
260
- cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
261
- else:
262
- cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))
263
-
264
- return cls_dets, cls_gts, cls_gts_ignore
265
-
266
-
267
- def eval_map(det_results,
268
- annotations,
269
- scale_ranges=None,
270
- iou_thr=0.5,
271
- dataset=None,
272
- logger=None,
273
- tpfp_fn=None,
274
- nproc=4):
275
- """Evaluate mAP of a dataset.
276
-
277
- Args:
278
- det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
279
- The outer list indicates images, and the inner list indicates
280
- per-class detected bboxes.
281
- annotations (list[dict]): Ground truth annotations where each item of
282
- the list indicates an image. Keys of annotations are:
283
-
284
- - `bboxes`: numpy array of shape (n, 4)
285
- - `labels`: numpy array of shape (n, )
286
- - `bboxes_ignore` (optional): numpy array of shape (k, 4)
287
- - `labels_ignore` (optional): numpy array of shape (k, )
288
- scale_ranges (list[tuple] | None): Range of scales to be evaluated,
289
- in the format [(min1, max1), (min2, max2), ...]. A range of
290
- (32, 64) means the area range between (32**2, 64**2).
291
- Default: None.
292
- iou_thr (float): IoU threshold to be considered as matched.
293
- Default: 0.5.
294
- dataset (list[str] | str | None): Dataset name or dataset classes,
295
- there are minor differences in metrics for different datsets, e.g.
296
- "voc07", "imagenet_det", etc. Default: None.
297
- logger (logging.Logger | str | None): The way to print the mAP
298
- summary. See `mmcv.utils.print_log()` for details. Default: None.
299
- tpfp_fn (callable | None): The function used to determine true/
300
- false positives. If None, :func:`tpfp_default` is used as default
301
- unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
302
- case). If it is given as a function, then this function is used
303
- to evaluate tp & fp. Default None.
304
- nproc (int): Processes used for computing TP and FP.
305
- Default: 4.
306
-
307
- Returns:
308
- tuple: (mAP, [dict, dict, ...])
309
- """
310
- assert len(det_results) == len(annotations)
311
-
312
- num_imgs = len(det_results)
313
- num_scales = len(scale_ranges) if scale_ranges is not None else 1
314
- num_classes = len(det_results[0]) # positive class num
315
- area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
316
- if scale_ranges is not None else None)
317
-
318
- pool = Pool(nproc)
319
- eval_results = []
320
- for i in range(num_classes):
321
- # get gt and det bboxes of this class
322
- cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
323
- det_results, annotations, i)
324
- # choose proper function according to datasets to compute tp and fp
325
- if tpfp_fn is None:
326
- if dataset in ['det', 'vid']:
327
- tpfp_fn = tpfp_imagenet
328
- else:
329
- tpfp_fn = tpfp_default
330
- if not callable(tpfp_fn):
331
- raise ValueError(
332
- f'tpfp_fn has to be a function or None, but got {tpfp_fn}')
333
-
334
- # compute tp and fp for each image with multiple processes
335
- tpfp = pool.starmap(
336
- tpfp_fn,
337
- zip(cls_dets, cls_gts, cls_gts_ignore,
338
- [iou_thr for _ in range(num_imgs)],
339
- [area_ranges for _ in range(num_imgs)]))
340
- tp, fp = tuple(zip(*tpfp))
341
- # calculate gt number of each scale
342
- # ignored gts or gts beyond the specific scale are not counted
343
- num_gts = np.zeros(num_scales, dtype=int)
344
- for j, bbox in enumerate(cls_gts):
345
- if area_ranges is None:
346
- num_gts[0] += bbox.shape[0]
347
- else:
348
- gt_areas = (bbox[:, 2] - bbox[:, 0]) * (
349
- bbox[:, 3] - bbox[:, 1])
350
- for k, (min_area, max_area) in enumerate(area_ranges):
351
- num_gts[k] += np.sum((gt_areas >= min_area)
352
- & (gt_areas < max_area))
353
- # sort all det bboxes by score, also sort tp and fp
354
- cls_dets = np.vstack(cls_dets)
355
- num_dets = cls_dets.shape[0]
356
- sort_inds = np.argsort(-cls_dets[:, -1])
357
- tp = np.hstack(tp)[:, sort_inds]
358
- fp = np.hstack(fp)[:, sort_inds]
359
- # calculate recall and precision with tp and fp
360
- tp = np.cumsum(tp, axis=1)
361
- fp = np.cumsum(fp, axis=1)
362
- eps = np.finfo(np.float32).eps
363
- recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
364
- precisions = tp / np.maximum((tp + fp), eps)
365
- # calculate AP
366
- if scale_ranges is None:
367
- recalls = recalls[0, :]
368
- precisions = precisions[0, :]
369
- num_gts = num_gts.item()
370
- mode = 'area' if dataset != 'voc07' else '11points'
371
- ap = average_precision(recalls, precisions, mode)
372
- eval_results.append({
373
- 'num_gts': num_gts,
374
- 'num_dets': num_dets,
375
- 'recall': recalls,
376
- 'precision': precisions,
377
- 'ap': ap
378
- })
379
- pool.close()
380
- if scale_ranges is not None:
381
- # shape (num_classes, num_scales)
382
- all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
383
- all_num_gts = np.vstack(
384
- [cls_result['num_gts'] for cls_result in eval_results])
385
- mean_ap = []
386
- for i in range(num_scales):
387
- if np.any(all_num_gts[:, i] > 0):
388
- mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
389
- else:
390
- mean_ap.append(0.0)
391
- else:
392
- aps = []
393
- for cls_result in eval_results:
394
- if cls_result['num_gts'] > 0:
395
- aps.append(cls_result['ap'])
396
- mean_ap = np.array(aps).mean().item() if aps else 0.0
397
-
398
- print_map_summary(
399
- mean_ap, eval_results, dataset, area_ranges, logger=logger)
400
-
401
- return mean_ap, eval_results
402
-
403
-
404
- def print_map_summary(mean_ap,
405
- results,
406
- dataset=None,
407
- scale_ranges=None,
408
- logger=None):
409
- """Print mAP and results of each class.
410
-
411
- A table will be printed to show the gts/dets/recall/AP of each class and
412
- the mAP.
413
-
414
- Args:
415
- mean_ap (float): Calculated from `eval_map()`.
416
- results (list[dict]): Calculated from `eval_map()`.
417
- dataset (list[str] | str | None): Dataset name or dataset classes.
418
- scale_ranges (list[tuple] | None): Range of scales to be evaluated.
419
- logger (logging.Logger | str | None): The way to print the mAP
420
- summary. See `mmcv.utils.print_log()` for details. Default: None.
421
- """
422
-
423
- if logger == 'silent':
424
- return
425
-
426
- if isinstance(results[0]['ap'], np.ndarray):
427
- num_scales = len(results[0]['ap'])
428
- else:
429
- num_scales = 1
430
-
431
- if scale_ranges is not None:
432
- assert len(scale_ranges) == num_scales
433
-
434
- num_classes = len(results)
435
-
436
- recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
437
- aps = np.zeros((num_scales, num_classes), dtype=np.float32)
438
- num_gts = np.zeros((num_scales, num_classes), dtype=int)
439
- for i, cls_result in enumerate(results):
440
- if cls_result['recall'].size > 0:
441
- recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
442
- aps[:, i] = cls_result['ap']
443
- num_gts[:, i] = cls_result['num_gts']
444
-
445
- if dataset is None:
446
- label_names = [str(i) for i in range(num_classes)]
447
- elif mmcv.is_str(dataset):
448
- label_names = get_classes(dataset)
449
- else:
450
- label_names = dataset
451
-
452
- if not isinstance(mean_ap, list):
453
- mean_ap = [mean_ap]
454
-
455
- header = ['class', 'gts', 'dets', 'recall', 'ap']
456
- for i in range(num_scales):
457
- if scale_ranges is not None:
458
- print_log(f'Scale range {scale_ranges[i]}', logger=logger)
459
- table_data = [header]
460
- for j in range(num_classes):
461
- row_data = [
462
- label_names[j], num_gts[i, j], results[j]['num_dets'],
463
- f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
464
- ]
465
- table_data.append(row_data)
466
- table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
467
- table = AsciiTable(table_data)
468
- table.inner_footing_row_border = True
469
- print_log('\n' + table.table, logger=logger)