parquet-converter commited on
Commit
7c99590
·
1 Parent(s): e10281c

Update parquet files (step 30 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/17TheWord/vits-models/text/__init__.py +0 -57
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Buildbox 2.2.8 BETA - Cracked Serial Key Download and Install Guide.md +0 -51
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/ETKA V7.3 VW AUDI SKODA SEAT MULTI PC Francaisl UPD.md +0 -175
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fxhome Photokey 6 Pro Mac Crack Torrent ((EXCLUSIVE)).md +0 -194
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Download Game Point Blank Strike di Laptop Tanpa Ribet.md +0 -123
  6. spaces/1phancelerku/anime-remove-background/APK Mody Free Download MOD APK Games and Apps for Android.md +0 -155
  7. spaces/1phancelerku/anime-remove-background/Barbie Influencer Makeup and Dress Up Games for TikTok Fans.md +0 -74
  8. spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/unittest.py +0 -29
  9. spaces/A00001/bingothoo/src/components/chat-message.tsx +0 -93
  10. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/__init__.py +0 -25
  11. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/dataset.py +0 -147
  12. spaces/AP123/IllusionDiffusion/README.md +0 -15
  13. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/base.py +0 -107
  14. spaces/Abduhoshim/speech_emotion_detection/README.md +0 -12
  15. spaces/Abhaykoul/Wizard-AI/app.py +0 -56
  16. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/1.js +0 -1
  17. spaces/Admin08077/Cosmosis/README.md +0 -16
  18. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/sde_team.py +0 -48
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetChildrenSizers.js +0 -15
  20. spaces/Alcedo/yunmedia/common.js +0 -173
  21. spaces/Amon1/ChatGPTForAcadamic/crazy_functions/批量总结PDF文档.py +0 -154
  22. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/editor.py +0 -507
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/ipndm.md +0 -20
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipelines.py +0 -1745
  25. spaces/Andy1621/uniformer_image_detection/mmdet/core/mask/mask_target.py +0 -122
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/__init__.py +0 -42
  27. spaces/Arnx/MusicGenXvAKN/audiocraft/modules/streaming.py +0 -135
  28. spaces/ArtyomKhyan/Detection/README.md +0 -12
  29. spaces/Avin1221/darkstorm2150-Protogen_x3.4_Official_Release/app.py +0 -3
  30. spaces/AzumaSeren100/XuanShen-Bert-VITS2/data_utils.py +0 -328
  31. spaces/Benson/text-generation/Examples/Cuentos De Espacio Mutante Blobs Ataque Ps Vita.md +0 -127
  32. spaces/Buatong/Computing/README.md +0 -12
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/docs/_source/_static/mathjax_mathml.user.js +0 -18
  34. spaces/CVPR/Text2Human/Text2Human/data/__init__.py +0 -0
  35. spaces/CVPR/WALT/mmdet/models/dense_heads/reppoints_head.py +0 -763
  36. spaces/Cambino/dog-classifier-gradio/DogBreedClassifier.py +0 -39
  37. spaces/Chirag4579/prakalpa-image-comparator/setup.sh +0 -18
  38. spaces/CikeyQI/Yunzai/Yunzai/lib/events/online.js +0 -18
  39. spaces/Crow34/Joi/README.md +0 -13
  40. spaces/DGSpitzer/TXT-2-IMG-2-MUSIC-2-VIDEO-w-RIFFUSION/README.md +0 -14
  41. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/V_D_M_X_.py +0 -241
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/tests/helpers.py +0 -101
  43. spaces/DaFujaTyping/hf-Chat-ui/README.md +0 -71
  44. spaces/DaFujaTyping/hf-Chat-ui/src/styles/highlight-js.css +0 -1
  45. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/swin_transformer.py +0 -43
  46. spaces/Detomo/aisatsu-app-api/app.py +0 -52
  47. spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/kalmanFilter.cpp +0 -152
  48. spaces/ElAnon/6btest/README.md +0 -12
  49. spaces/Eroggen/ChatGPT4/README.md +0 -14
  50. spaces/EsoCode/text-generation-webui/modules/logging_colors.py +0 -117
spaces/17TheWord/vits-models/text/__init__.py DELETED
@@ -1,57 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
- from text.symbols import symbols
4
-
5
-
6
- # Mappings from symbol to numeric ID and vice versa:
7
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
- _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
-
10
-
11
- def text_to_sequence(text, symbols, cleaner_names):
12
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
- Args:
14
- text: string to convert to a sequence
15
- cleaner_names: names of the cleaner functions to run the text through
16
- Returns:
17
- List of integers corresponding to the symbols in the text
18
- '''
19
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
20
- sequence = []
21
-
22
- clean_text = _clean_text(text, cleaner_names)
23
- for symbol in clean_text:
24
- if symbol not in _symbol_to_id.keys():
25
- continue
26
- symbol_id = _symbol_to_id[symbol]
27
- sequence += [symbol_id]
28
- return sequence, clean_text
29
-
30
-
31
- def cleaned_text_to_sequence(cleaned_text):
32
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
33
- Args:
34
- text: string to convert to a sequence
35
- Returns:
36
- List of integers corresponding to the symbols in the text
37
- '''
38
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
39
- return sequence
40
-
41
-
42
- def sequence_to_text(sequence):
43
- '''Converts a sequence of IDs back to a string'''
44
- result = ''
45
- for symbol_id in sequence:
46
- s = _id_to_symbol[symbol_id]
47
- result += s
48
- return result
49
-
50
-
51
- def _clean_text(text, cleaner_names):
52
- for name in cleaner_names:
53
- cleaner = getattr(cleaners, name)
54
- if not cleaner:
55
- raise Exception('Unknown cleaner: %s' % name)
56
- text = cleaner(text)
57
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Buildbox 2.2.8 BETA - Cracked Serial Key Download and Install Guide.md DELETED
@@ -1,51 +0,0 @@
1
-
2
- <h1>Buildbox 2.2.8 BETA - Cracked Serial Key</h1>
3
- <p>Do you want to create amazing games without coding? Do you want to access the latest features and updates of Buildbox, one of the most popular game development software in the market? Do you want to save money and time by using a cracked serial key for Buildbox 2.2.8 BETA? If you answered yes to any of these questions, then this article is for you.</p>
4
- <h2>Buildbox 2.2.8 BETA - Cracked Serial Key</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://byltly.com/2uKyhc">https://byltly.com/2uKyhc</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about Buildbox 2.2.8 BETA - cracked serial key, including what is Buildbox and why you need it, what is new in Buildbox 2.2.8 BETA, how to get Buildbox 2.2.8 BETA cracked serial key, pros and cons of using cracked serial key, and conclusion and FAQs.</p>
6
- <p>By the end of this article, you will have a clear idea of whether you should use a cracked serial key for Buildbox 2.2.8 BETA or not, and how to do it safely and effectively.</p>
7
- <h2>What is Buildbox and why you need it</h2>
8
- <p>Buildbox is a game development software that allows anyone to create games without coding or programming skills.</p>
9
- <p>How to download Buildbox 2.2.8 BETA cracked version<br />
10
- Buildbox 2.2.8 BETA activation code generator<br />
11
- Buildbox 2.2.8 BETA full version free download<br />
12
- Buildbox 2.2.8 BETA crack patch keygen<br />
13
- Buildbox 2.2.8 BETA license key crack<br />
14
- Buildbox 2.2.8 BETA torrent download link<br />
15
- Buildbox 2.2.8 BETA crack for Windows 10<br />
16
- Buildbox 2.2.8 BETA crack for Mac OS X<br />
17
- Buildbox 2.2.8 BETA serial number crack<br />
18
- Buildbox 2.2.8 BETA registration code crack<br />
19
- Buildbox 2.2.8 BETA crack no survey<br />
20
- Buildbox 2.2.8 BETA crack online<br />
21
- Buildbox 2.2.8 BETA crack offline<br />
22
- Buildbox 2.2.8 BETA crack without virus<br />
23
- Buildbox 2.2.8 BETA crack working 100%<br />
24
- Buildbox 2.2.8 BETA latest version crack<br />
25
- Buildbox 2.2.8 BETA updated version crack<br />
26
- Buildbox 2.2.8 BETA crack with tutorial<br />
27
- Buildbox 2.2.8 BETA crack with proof<br />
28
- Buildbox 2.2.8 BETA crack with support<br />
29
- Buildbox 2.2.8 BETA game maker software crack<br />
30
- Buildbox 2.2.8 BETA game engine crack<br />
31
- Buildbox 2.2.8 BETA game development tool crack<br />
32
- Buildbox 2.2.8 BETA game creation software crack<br />
33
- Buildbox 2.2.8 BETA drag and drop game builder crack<br />
34
- Buildbox 2.2.8 BETA no code game maker crack<br />
35
- Buildbox 2.2.8 BETA easy game maker crack<br />
36
- Buildbox 2.2.8 BETA best game maker crack<br />
37
- Buildbox 2.2.8 BETA professional game maker crack<br />
38
- Buildbox 2.2.8 BETA powerful game maker crack<br />
39
- Buildbox 2.2.8 BETA advanced game maker crack<br />
40
- Buildbox 2.2.8 BETA simple game maker crack<br />
41
- Buildbox 2.2.8 BETA fast game maker crack<br />
42
- Buildbox 2.2.8 BETA fun game maker crack<br />
43
- Buildbox 2.</p>
44
- <p>With Buildbox, you can drag and drop assets, add animations, sounds, effects, logic, and more to create your own games in minutes.</p>
45
- <p>Buildbox also has a powerful physics engine, a smart asset library, a node-based editor, an animation timeline, a monetization system, an export feature, and many other tools that make game creation easy and fun.</p>
46
- <p>Buildbox is used by thousands of game developers around the world, from beginners to professionals.</p>
47
- <p>Some of the most successful games made with Buildbox include Color Switch, The Line Zen, Phases, Sky, Ball Jump, Slip Away, Switchy Sides, Endless Sky, Damn Daniel, Hoppy Frog, Miximal Adventure, Void Troopers, Drop Out!, Nerves, The Pit, Jelly Jump, Zig Zag Boom, Skyward Journey, Ball Blast!, Rise Up!, Hoop Smash!, Flip Runner!, Dunk Shot!, Dune!, Rider!, Ballz!, Stack!, Twisty Road!, Flappy Dunk!, Snake vs Block!, Rolling Sky!, Dancing Line!, Piano Tiles 2!, Geometry Dash SubZero!, Crossy Road!, Subway Surfers!, Temple Run!, Angry Birds!, Fruit Ninja!, Jetpack Joyride!, Cut The Rope!, Plants vs Zombies!, Candy Crush Saga! , Clash Of Clans! , Minecraft! , Fortnite! , PUBG! , Among Us! , Roblox! , GTA! , Call Of Duty! , FIFA! , NBA! , Pokemon Go! , Super Mario Run! , Sonic Dash! , Asphalt 9! , CSR Racing 2! , Need For Speed! , Real Racing 3! , Hill Climb Racing! , Bike Race! , Traffic Rider! , Trials Frontier! , Alto's Adventure! , Ski Safari! , Tiny Wings! , Jet Car Stunts! , Badland! , Limbo! , Monument Valley! , Lara Croft Go! , Hitman Go! , Deus Ex Go! , The Room! , The Witness! , Myst! , Machinarium! , Samorost! , Botanicula! , World Of Goo! , Little Inferno! , Human Resource Machine! , Threes! , 2048! , Sudoku! , Tetris! , Pac-Man! , Space Invaders! , Galaga! , Asteroids! , Pong!</p>
48
- <p>As you can see, Buildbox can help you create any type of game you can imagine.</p>
49
- <p>Whether you want to make casual games, arcade games,</p> 0a6ba089eb<br />
50
- <br />
51
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/ETKA V7.3 VW AUDI SKODA SEAT MULTI PC Francaisl UPD.md DELETED
@@ -1,175 +0,0 @@
1
- <br />
2
- <h1>ETKA V7.3 VW AUDI SKODA SEAT MULTI PC Francaisl: What is it and how to install it?</h1>
3
- <p>If you own a car from the Volkswagen Group, such as Audi, VW, Seat or Skoda, you may have heard of ETKA, the electronic catalogue that contains the full information on spare parts and accessories for these cars. In this article, we will explain what ETKA is, what are its features and benefits, and how to install and use ETKA V7.3 on your PC.</p>
4
- <h2>ETKA V7.3 VW AUDI SKODA SEAT MULTI PC Francaisl</h2><br /><p><b><b>Download</b> &middot;&middot;&middot; <a href="https://byltly.com/2uKvBv">https://byltly.com/2uKvBv</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p>ETKA is an acronym for Elektronischer Teilekatalog, which means electronic parts catalogue in German. It is a software that allows you to search for and order spare parts and accessories for your car based on its model, year, engine type, chassis number, etc. You can also view diagrams, illustrations, prices, availability, specifications and other useful information about each part or accessory.</p>
7
- <p>ETKA was first developed by LexCom GmbH in 1989 for Audi AG, and later expanded to cover other brands of the Volkswagen Group, such as VW, Seat and Skoda. Since then, ETKA has been updated regularly with new data and features, and has become an indispensable tool for car owners, mechanics, dealers and enthusiasts.</p>
8
- <p>Some of the features and benefits of using ETKA are:</p>
9
- <ul>
10
- <li>It covers all models of Audi, VW, Seat and Skoda from 1947 to present.</li>
11
- <li>It supports multiple languages, including French (Francaisl), English, German, Spanish, Italian, Portuguese, etc.</li>
12
- <li>It allows you to print or save parts lists, diagrams and other information in various formats.</li>
13
- <li>It helps you to identify the correct part or accessory for your car and avoid mistakes or compatibility issues.</li>
14
- <li>It helps you to compare prices and availability of parts from different suppliers or regions.</li>
15
- <li>It helps you to maintain your car in good condition and save money on repairs or replacements.</li>
16
- </ul>
17
- <p>To use ETKA on your PC, you need to meet the following system requirements:</p>
18
- <ul>
19
- <li>A Windows operating system (Windows XP/Vista/7/8/10)</li>
20
- <li>A DVD-ROM drive or a virtual drive software</li>
21
- <li>At least 4 GB of RAM</li>
22
- <li>At least 20 GB of free hard disk space</li>
23
- <li>An internet connection (optional but recommended)</li>
24
- </ul>
25
- <p>ETKA is compatible with most diagnostic tools and software that use OBD-II protocols, such as VCDS (VAG-COM), ODIS (Offboard Diagnostic Information System), etc.</p>
26
- <h2>How to install ETKA V7.3 on your PC?</h2>
27
- <p>To install ETKA V7.3 on your PC, you need to follow these steps:</p>
28
- <ol>
29
- <li>Download ETKA V7.3 from a reliable source (such as <a href="https://www.obd2france.com/wholesale/audi-vw-seat-skoda-etka-electronic-catalogue.html">this website</a>) or obtain a DVD copy from a dealer or a friend.</li>
30
- <li>Insert the DVD into your DVD-ROM drive or mount the ISO file using a virtual drive software (such as Daemon Tools).</li>
31
- <li>Run the setup.exe file from the DVD or the virtual drive and follow the instructions on the screen.</li>
32
- <li>Select the language you want to install (such as Francaisl) and accept the license agreement.</li>
33
- <li>Select the brands you want to install (such as Audi, VW, Seat and Skoda) and choose the installation path.</li>
34
- <li>Wait for the installation process to complete (it may take several minutes).</li>
35
- </ol>
36
- <p>To activate ETKA V7.3 on your PC, you need to follow these steps:</p>
37
- <ol>
38
- <li>Run the ETKA Loader.exe file from the installation folder (usually C:\ETKA).</li>
39
- <li>Select your country or region (such as France) and click OK.</li>
40
- <li>Enter your user name (such as Admin) and password (such as 12345) and click OK.</li>
41
- <li>Wait for the activation process to complete (it may take several seconds).</li>
42
- </ol>
43
- <p>To update ETKA V7.3 to the latest version on your PC, you need to follow these steps:</p>
44
- <p>ETKA V7.3 software for VW AUDI SKODA SEAT cars<br />
45
- How to install ETKA V7.3 on multiple PC in French<br />
46
- ETKA V7.3 electronic parts catalog for VW AUDI SKODA SEAT<br />
47
- ETKA V7.3 update download for VW AUDI SKODA SEAT<br />
48
- ETKA V7.3 price and features comparison for VW AUDI SKODA SEAT<br />
49
- ETKA V7.3 compatibility with Windows 10 for VW AUDI SKODA SEAT<br />
50
- ETKA V7.3 online access for VW AUDI SKODA SEAT<br />
51
- ETKA V7.3 crack and keygen for VW AUDI SKODA SEAT<br />
52
- ETKA V7.3 user manual and guide in French for VW AUDI SKODA SEAT<br />
53
- ETKA V7.3 reviews and ratings for VW AUDI SKODA SEAT<br />
54
- ETKA V7.3 alternatives and competitors for VW AUDI SKODA SEAT<br />
55
- ETKA V7.3 support and customer service for VW AUDI SKODA SEAT<br />
56
- ETKA V7.3 benefits and advantages for VW AUDI SKODA SEAT<br />
57
- ETKA V7.3 disadvantages and drawbacks for VW AUDI SKODA SEAT<br />
58
- ETKA V7.3 troubleshooting and error fixing for VW AUDI SKODA SEAT<br />
59
- ETKA V7.3 free trial and demo for VW AUDI SKODA SEAT<br />
60
- ETKA V7.3 best practices and tips for VW AUDI SKODA SEAT<br />
61
- ETKA V7.3 latest version and release date for VW AUDI SKODA SEAT<br />
62
- ETKA V7.3 FAQs and common questions for VW AUDI SKODA SEAT<br />
63
- ETKA V7.3 testimonials and case studies for VW AUDI SKODA SEAT<br />
64
- ETKA V7.3 coupons and discounts for VW AUDI SKODA SEAT<br />
65
- ETKA V7.3 warranty and refund policy for VW AUDI SKODA SEAT<br />
66
- ETKA V7.3 system requirements and specifications for VW AUDI SKODA SEAT<br />
67
- ETKA V7.3 training and certification for VW AUDI SKODA SEAT<br />
68
- ETKA V7.3 integration and customization for VW AUDI SKODA SEAT<br />
69
- ETKA V7.3 security and privacy features for VW AUDI SKODA SEAT<br />
70
- ETKA V7.3 backup and restore options for VW AUDI SKODA SEAT<br />
71
- ETKA V7.3 performance and speed optimization for VW AUDI SKODA SEAT<br />
72
- ETKA V7.3 forum and community for VW AUDI SKODA SEAT<br />
73
- ETKA V7.3 blog and news for VW AUDI SKODA SEAT<br />
74
- How to use ETKA V7.3 to find parts for VW AUDI SKODA SEAT<br />
75
- How to upgrade from ETKA 6 to ETKA 7.3 for VW AUDI SKODA SEAT<br />
76
- How to export data from ETKA 7.3 to Excel or PDF for VW AUDI SKODA SEAT<br />
77
- How to print labels from ETKA 7.3 for VW AUDI SKODA SEAT<br />
78
- How to change language in ETKA 7.3 from English to French for VW AUDI SKODA SEAT<br />
79
- How to add VIN or PR codes in ETKA 7.3 for VW AUDI SKODA SEAT<br />
80
- How to search by part number or description in ETK</p>
81
- <ol>
82
- <li>Connect your PC to the internet (if not already connected).</li>
83
- <li>Run the Update.exe file from the installation folder (usually C:\ETKA).</li>
84
- <li>Select your country or region (such as France) and click OK.</li>
85
- <li>Select the brands you want to update (such as Audi, VW, Seat and Skoda) and click OK.</li>
86
- <li>Wait for the update process to complete (it may take several minutes).</li>
87
- </ol>
88
- <p>To use ETKA V7.3 to search for spare parts and accessories for your car on your PC, you need to follow these steps:</p>
89
- <ol>
90
- <li>Run the ETKA Loader.exe file from the installation folder (usually C:\ETKA).</li>
91
- <li>Select your country or region (such as France) and click OK.</li>
92
- <li>Select your user name (such as Admin) and password (such as 12345) and click OK.</li>
93
- <li>Select the brand you want to search for (such as Audi) from the main menu.</li>
94
- <li>Select one of the search methods from the sub-menu:</li>
95
- <ul>
96
- <li><b>VIN Search:</b> Enter your car's VIN number (Vehicle Identification Number) in the box and click Search. This will display all the information about your car based on its VIN number.</li>
97
- <li><b>Type Search:</b> Select your car's model group (such as A4), model year (such as 2010), engine type (such as TDI), etc. from the drop-down menus and click Search. This will display all the information about your car based on its type.</li>
98
- <li><b>Favourites Search:</b> Select one of your favourite cars that you have saved previously from the list and click Search. This will display all the information about your favourite car based on its VIN number or type.</li>
99
- </ul>
100
- <li>Select one of the categories from the left panel (such as Engine) that you want to search for parts or accessories.</li>
101
- <li>Select one of the sub-categories from the right panel (such as Cylinder Head) that you want to search for parts or accessories.</li>
102
- <li>Select one of the items from the bottom panel (such as Valve Guide) that you want to search for parts or accessories.</li>
103
- <li>This will display a diagram of the item with its part number, name, price, availability, etc. You can zoom in or out of the diagram by using the + or - buttons at the top right corner.</li>
104
- <li>You can also view more details about each part by clicking on its part number or name in the diagram or in the table below it.</li>
105
- </ol>
106
- <h2>Conclusion</h2>
107
- <p>In this article, we have explained what ETKA is, what are its features and benefits, and how to install and use <h2>Conclusion</h2>
108
- <p>In this article, we have explained what ETKA is, what are its features and benefits, and how to install and use ETKA V7.3 on your PC. We hope that this article has helped you to understand and appreciate ETKA better, and that you will find it useful for your car maintenance and repair needs.</p>
109
- <p>Here are some tips and tricks for using ETKA V7.3 effectively:</p>
110
- <ul>
111
- <li>You can switch between different brands (such as Audi, VW, Seat and Skoda) by clicking on their logos at the top left corner of the main menu.</li>
112
- <li>You can switch between different languages (such as Francaisl, English, German, etc.) by clicking on the flag icon at the top right corner of the main menu.</li>
113
- <li>You can save your favourite cars by clicking on the star icon at the top right corner of the sub-menu. You can also edit or delete your favourites by right-clicking on them in the Favourites Search list.</li>
114
- <li>You can print or save parts lists, diagrams and other information by clicking on the printer or disk icons at the top right corner of the bottom panel. You can also copy or paste parts numbers or names by right-clicking on them in the diagram or in the table.</li>
115
- <li>You can search for parts by keywords by clicking on the magnifying glass icon at the top right corner of the left panel. You can also filter parts by criteria such as price range, availability, etc. by clicking on the funnel icon at the top right corner of the right panel.</li>
116
- </ul>
117
- <p>If you have any comments or questions about ETKA V7.3, please feel free to leave them below. We would love to hear from you and help you with any issues you may encounter.</p>
118
- <h2>FAQs</h2>
119
- <p>Here are some frequently asked questions and answers about ETKA V7.3:</p>
120
- <h3>What are some common problems and solutions when installing or using ETKA V7.3?</h3>
121
- <p>Some of the common problems and solutions when installing or using ETKA V7.3 are:</p>
122
- <ul>
123
- <li><b>Problem:</b> The installation process fails or freezes.</li>
124
- <li><b>Solution:</b> Make sure that your PC meets the system requirements and that you have enough free disk space. Also, make sure that you have disabled any antivirus or firewall software that may interfere with the installation process.</li>
125
- <li><b>Problem:</b> The activation process fails or gives an error message.</li>
126
- <li><b>Solution:</b> Make sure that you have entered the correct user name and password and that you have selected the correct country or region. Also, make sure that your PC is connected to the internet and that you have allowed ETKA Loader.exe to access it.</li>
127
- <li><b>Problem:</b> The update process fails or gives an error message.</li>
128
- <li><b>Solution:</b> Make sure that your PC is connected to the internet and that you have enough free disk space. Also, make sure that you have disabled any antivirus or firewall software that may interfere with the update process.</li>
129
- <li><b>Problem:</b> The search results are incomplete or incorrect.</li>
130
- <li><b>Solution:</b> Make sure that you have selected the correct brand, model, year, engine type, etc. for your car. Also, make sure that you have updated ETKA V7.3 to the latest version and that you have entered the correct part number or name in the search box.</li>
131
- </ul>
132
- <h3>How to switch between different languages in ETKA V7.3?</h3>
133
- <p>To switch between different languages in ETKA V7.3, you need to follow these steps:</p>
134
- <ol>
135
- <li>Run the ETKA Loader.exe file from the installation folder (usually C:\ETKA).</li>
136
- <li>Select your country or region (such as France) and click OK.</li>
137
- <li>Select your user name (such as Admin) and password (such as 12345) and click OK.</li>
138
- <li>Select the brand you want to search for (such as Audi) from the main menu.</li>
139
- <li>Click on the flag icon at the top right corner of the main menu.</li>
140
- <h3>How to switch between different languages in ETKA V7.3?</h3>
141
- <p>To switch between different languages in ETKA V7.3, you need to follow these steps:</p>
142
- <ol>
143
- <li>Run the ETKA Loader.exe file from the installation folder (usually C:\ETKA).</li>
144
- <li>Select your country or region (such as France) and click OK.</li>
145
- <li>Select your user name (such as Admin) and password (such as 12345) and click OK.</li>
146
- <li>Select the brand you want to search for (such as Audi) from the main menu.</li>
147
- <li>Click on the flag icon at the top right corner of the main menu.</li>
148
- <li>Select the language you want to switch to (such as English) from the drop-down menu.</li>
149
- <li>Wait for the language change to take effect (it may take a few seconds).</li>
150
- </ol>
151
- <h3>How to backup and restore ETKA V7.3 data?</h3>
152
- <p>To backup and restore ETKA V7.3 data, you need to follow these steps:</p>
153
- <ol>
154
- <li>To backup ETKA V7.3 data, copy the entire ETKA folder (usually C:\ETKA) to a safe location, such as an external hard drive or a USB flash drive.</li>
155
- <li>To restore ETKA V7.3 data, copy the backed up ETKA folder from the safe location to your PC, and overwrite the existing ETKA folder (usually C:\ETKA).</li>
156
- </ol>
157
- <h3>How to contact ETKA support or customer service?</h3>
158
- <p>To contact ETKA support or customer service, you can use one of these methods:</p>
159
- <ul>
160
- <li>Email: You can send an email to [email protected] with your name, country, phone number and problem description.</li>
161
- <li>Phone: You can call +49 89 189 31 31 0 from Monday to Friday, 8:00 am to 5:00 pm (Central European Time).</li>
162
- <li>Website: You can visit <a href="https://www.lexcom.de/en/">https://www.lexcom.de/en/</a> and fill out the contact form or use the live chat feature.</li>
163
- </ul>
164
- <h3>How to uninstall ETKA V7.3 from your PC?</h3>
165
- <p>To uninstall ETKA V7.3 from your PC, you need to follow these steps:</p>
166
- <ol>
167
- <li>Run the Uninstall.exe file from the installation folder (usually C:\ETKA).</li>
168
- <li>Select your country or region (such as France) and click OK.</li>
169
- <li>Select your user name (such as Admin) and password (such as 12345) and click OK.</li>
170
- <li>Select the brands you want to uninstall (such as Audi, VW, Seat and Skoda) and click OK.</li>
171
- <li>Wait for the uninstallation process to complete (it may take several minutes).</li>
172
- </ol>
173
- </p> 0a6ba089eb<br />
174
- <br />
175
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fxhome Photokey 6 Pro Mac Crack Torrent ((EXCLUSIVE)).md DELETED
@@ -1,194 +0,0 @@
1
-
2
- <h1>Fxhome Photokey 6 Pro Mac Crack Torrent: What You Need to Know</h1>
3
- <p>If you are looking for a powerful and easy-to-use photo editing software for Mac, you might have heard of <strong>Fxhome Photokey 6 Pro</strong>. This software allows you to remove green screen backgrounds from your photos and replace them with any image you want. You can also add effects, filters, layers, and text to your photos and create stunning images in minutes.</p>
4
- <h2>Fxhome Photokey 6 Pro Mac Crack Torrent</h2><br /><p><b><b>Download</b> &#10002; &#10002; &#10002; <a href="https://byltly.com/2uKxLm">https://byltly.com/2uKxLm</a></b></p><br /><br />
5
- <p>But what if you don't want to pay $299 for the software license fee? What if you want to access the full version of the software without any limitations? What if you want to use the software offline without internet connection?</p>
6
- <p>In that case, you might be tempted to download <strong>Fxhome Photokey 6 Pro as a crack torrent</strong>. A crack torrent is a file that contains the cracked version of a software, which means that it has <p>been modified to bypass the security and activation features of the original software. A torrent is a file that contains the metadata of a file or a group of files that can be downloaded from other users through a peer-to-peer network.</p>
7
- <p>Downloading Fxhome Photokey 6 Pro as a crack torrent might seem like a good idea, but is it really worth it? What are the advantages and disadvantages of doing so? How can you download Fxhome Photokey 6 Pro as a crack torrent safely and easily? Are there any alternatives to downloading Fxhome Photokey 6 Pro as a crack torrent?</p>
8
- <p>In this article, we will answer all these questions and more. We will give you a comprehensive guide on everything you need to know about Fxhome Photokey 6 Pro Mac crack torrent. We will also provide you with some alternative ways to get Fxhome Photokey 6 Pro for Mac without downloading it as a crack torrent. By the end of this article, you will be able to make an informed decision on whether to download Fxhome Photokey 6 Pro as a crack torrent or not.</p>
9
- <h2>What is Fxhome Photokey 6 Pro?</h2>
10
- <p>Fxhome Photokey 6 Pro is a photo editing software developed by Fxhome, a company that specializes in creating visual effects and video editing software. Fxhome Photokey 6 Pro is designed for Mac users who want to create professional-looking photos with green screen backgrounds.</p>
11
- <p>Green screen backgrounds are commonly used in photography and filmmaking to replace the background of a subject with another image. For example, you can take a photo of a person in front of a green screen and then replace the green screen with a scenic landscape, a city skyline, or any other image you want.</p>
12
- <p></p>
13
- <p>Fxhome Photokey 6 Pro makes this process easy and fast. You can import your photos with green screen backgrounds into the software and then choose from over 250 background images that are included in the software. You can also import your own background images or use online images from Flickr or Google Images. Fxhome Photokey 6 Pro will automatically remove the green screen from your photos and blend them with the background images seamlessly.</p>
14
- <p>But that's not all. Fxhome Photokey 6 Pro also allows you to add effects, filters, layers, and text to your photos. You can adjust the color, brightness, contrast, saturation, and sharpness of your photos. You can apply artistic effects such as blur, glow, vignette, and gradient. You can add layers such as shadows, reflections, borders, and watermarks. You can also add text to your photos using different fonts, colors, sizes, and styles.</p>
15
- <p>Fxhome Photokey 6 Pro is compatible with Mac OS X 10.6 or later. It supports various image formats such as JPEG, PNG, TIFF, BMP, and RAW. It also integrates with Adobe Photoshop and Lightroom, allowing you to export your photos directly to these software for further editing.</p>
16
- <p>Fxhome Photokey 6 Pro is ideal for photographers who want to create stunning photos with green screen backgrounds without spending too much time and money on expensive equipment and software. It is also suitable for hobbyists who want to have fun with their photos and unleash their creativity.</p>
17
- <h2>Why Download Fxhome Photokey 6 Pro as a Crack Torrent?</h2>
18
- <p>As mentioned earlier, Fxhome Photokey 6 Pro is not a free software. It costs $299 for the software license fee. This means that you have to pay this amount before you can use the software legally and fully.</p>
19
- <p>However, some people might not be willing or able to pay this amount for various reasons. They might think that the software is too expensive for their budget or needs. They might also think that they can get the same or better results with other free or cheaper software.</p>
20
- <p>Therefore, some people might resort to downloading Fxhome Photokey 6 Pro as a crack torrent instead. They might think that this is a smart way to save money and get access to the full version of the software without any limitations.</p>
21
- <p>But is downloading Fxhome Photokey 6 Pro as a crack torrent really a good idea? What are the pros and cons of doing so? Let's take a look at some of the advantages and disadvantages of downloading Fxhome Photokey 6 Pro as a crack torrent.</p>
22
- <h3>Advantages of Downloading Fxhome Photokey 6 Pro as a Crack Torrent</h3>
23
- <p>Some of the possible advantages of downloading Fxhome Photokey 6 Pro as a crack torrent are:</p>
24
- <ul>
25
- <li><strong>Saving money on the software license fee</strong>: This is probably the most obvious and common reason why people download Fxhome Photokey 6 Pro as a crack torrent. By doing so, they can avoid paying $299 for the software license fee and use the software for free.</li>
26
- <li><strong>Accessing the full version of the software without limitations</strong>: Another possible advantage of downloading Fxhome Photokey 6 Pro as a crack torrent is that they can access the full version of the software without any limitations. This means that they can use all the features and functions of the software without any restrictions or watermarks.</li>
27
- <li><strong>Being able to use the software offline without internet connection</strong>: A third possible advantage of downloading Fxhome Photokey 6 Pro as a crack torrent is that they can use the software offline without internet connection. This means that they can use the software anytime and anywhere without worrying about the internet availability or speed.</li>
28
- </ul>
29
- <h3>Disadvantages of Downloading Fxhome Photokey 6 Pro as a Crack Torrent</h3>
30
- <p>However, downloading Fxhome Photokey 6 Pro as a crack torrent also comes with some serious disadvantages. Some of them are:</p>
31
- <ul>
32
- <li><strong>Risking malware infection from untrusted sources</strong>: This is probably the most dangerous and common disadvantage of downloading Fxhome Photokey 6 Pro as a crack torrent. By downloading Fxhome Photokey 6 Pro as a crack torrent from untrusted sources, they are exposing their Mac computer to potential malware infection. Malware is malicious software that can harm their computer or steal their personal information. Some examples of malware are viruses, worms, trojans, spyware, adware, ransomware, and rootkits. Malware can cause various problems such as slowing down their computer, deleting or encrypting their files, displaying unwanted ads or pop-ups, redirecting their browser to malicious websites, stealing their passwords or credit card details, or even locking their computer and demanding a ransom to unlock it.</li>
33
- <li><strong>Violating the intellectual property rights of the software developer</strong>: Another serious disadvantage of downloading Fxhome Photokey 6 Pro as a crack torrent is that they are violating the intellectual property rights of the software developer. Intellectual property rights are the legal rights that protect the creations and inventions of individuals or organizations. By downloading Fxhome Photokey 6 Pro as a crack torrent, they are infringing on the copyright and trademark rights of Fxhome, the company that developed and owns Fxhome Photokey 6 Pro. This is not only unethical but also illegal.</li>
34
- <li><strong>Facing legal consequences for software piracy</strong>: A third serious disadvantage of downloading Fxhome Photokey 6 Pro as a crack torrent is that they are facing legal consequences for software piracy. Software piracy is the unauthorized copying, distribution, or use of software. By downloading Fxhome Photokey 6 Pro as a crack torrent, they are committing software piracy and breaking the law. Depending on the country and jurisdiction, software piracy can result in various penalties such as fines, imprisonment, or both.</li>
35
- <li><strong>Missing out on updates and technical support from the official website</strong>: A fourth disadvantage of downloading Fxhome Photokey 6 Pro as a crack torrent is that they are missing out on updates and technical support from the official website. By downloading Fxhome Photokey 6 Pro as a crack torrent, they are not eligible to receive any updates or technical support from Fxhome. This means that they will not be able to enjoy any new features or improvements that Fxhome might release for Fxhome Photokey 6 Pro in the future. It also means that they will not be able to get any help or assistance from Fxhome if they encounter any problems or issues with Fxhome Photokey 6 Pro.</li>
36
- </ul>
37
- <h2>How to Download Fxhome Photokey 6 Pro as a Crack Torrent?</h2>
38
- <p>If you still want to download Fxhome Photokey 6 Pro as a crack torrent despite knowing the disadvantages and risks involved, you will need to follow some steps to do so. Here is a step-by-step guide on how to download Fxhome Photokey 6 Pro as a crack torrent for Mac users:</p>
39
- <h3>Step 1: Find a Reliable Torrent Website</h3>
40
- <p>The first step to download Fxhome Photokey 6 Pro as a crack torrent is to find a reliable torrent website that offers Fxhome Photokey 6 Pro as a crack torrent. A torrent website is a website that hosts and indexes torrent files that can be downloaded by users through a peer-to-peer network.</p>
41
- <p>There are many torrent websites available on the internet, but not all of them are trustworthy or safe. Some of them might contain fake or malicious files that can harm your computer or steal your information. Some of them might also be blocked or banned by your internet service provider or government due to legal issues.</p>
42
- <p>Therefore Therefore, you need to be careful and selective when choosing a torrent website to download Fxhome Photokey 6 Pro as a crack torrent. You need to look for a torrent website that has a good reputation, a large user base, a high seed-to-leech ratio, and a positive feedback system. You also need to check the legality and safety of the torrent website in your country and region.</p>
43
- <p>Some examples of popular and reliable torrent websites that might offer Fxhome Photokey 6 Pro as a crack torrent are:</p>
44
- <ul>
45
- <li><a href="">The Pirate Bay</a>: This is one of the oldest and most famous torrent websites in the world. It has millions of users and hosts millions of torrent files in various categories such as movies, music, games, software, and more. It also has a simple and user-friendly interface that allows you to search and download torrent files easily.</li>
46
- <li><a href="">1337x</a>: This is another popular and well-known torrent website that offers a wide range of torrent files in various genres and languages. It has a modern and attractive design that makes it easy to navigate and find what you are looking for. It also has a community section where you can interact with other users and get recommendations and reviews.</li>
47
- <li><a href="">RARBG</a>: This is a torrent website that specializes in high-quality video content such as movies, TV shows, documentaries, and more. It has a loyal and active user base that ensures fast and reliable downloads. It also has a top 10 list that shows you the most popular and trending torrent files on the website.</li>
48
- </ul>
49
- <p>However, these are just examples and not endorsements. We do not recommend or encourage you to download Fxhome Photokey 6 Pro as a crack torrent from any torrent website. We are not responsible for any consequences or damages that might result from doing so.</p>
50
- <h3>Step 2: Download and Install a Torrent Client</h3>
51
- <p>The second step to download Fxhome Photokey 6 Pro as a crack torrent is to download and install a torrent client software that allows you to download and manage torrent files. A torrent client software is a program that connects you to other users who have the same torrent file as you and enables you to download the file from them.</p>
52
- <p>There are many torrent client software available on the internet, but not all of them are compatible or suitable for Mac users. Some of them might have compatibility issues, security vulnerabilities, or unwanted features such as ads or malware. Some of them might also be blocked or banned by your internet service provider or government due to legal issues.</p>
53
- <p>Therefore, you need to be careful and selective when choosing a torrent client software to download Fxhome Photokey 6 Pro as a crack torrent. You need to look for a torrent client software that has a good reputation, a large user base, a high performance, and a secure encryption. You also need to check the legality and safety of the torrent client software in your country and region.</p>
54
- <p>Some examples of popular and reliable torrent client software that are compatible with Mac users are:</p>
55
- <ul>
56
- <li><a href="">qBittorrent</a>: This is one of the best and most widely used torrent client software in the world. It is free, open-source, lightweight, and easy-to-use. It has all the essential features such as magnet links, DHT, PEX, encryption, bandwidth control, RSS feeds, and more. It also has no ads or malware.</li>
57
- <li><a href="">Transmission</a>: This is another excellent and popular torrent client software that is designed specifically for Mac users. It is also free, open-source, simple, and fast. It has all the basic features such as magnet links, DHT, PEX, encryption, bandwidth control, RSS feeds and more. It also has no ads or malware.</li>
58
- <li><a href="">uTorrent</a>: This is one of the oldest and most famous torrent client software in the world. It is also free, lightweight, and easy-to-use. It has all the advanced features such as magnet links, DHT, PEX, encryption, bandwidth control, RSS feeds, and more. However, it also has some ads and offers that might be annoying or unwanted.</li>
59
- </ul>
60
- <p>However, these are just examples and not endorsements. We do not recommend or encourage you to download Fxhome Photokey 6 Pro as a crack torrent using any torrent client software. We are not responsible for any consequences or damages that might result from doing so.</p>
61
- <h3>Step 3: Download Fxhome Photokey 6 Pro as a Crack Torrent</h3>
62
- <p>The third step to download Fxhome Photokey 6 Pro as a crack torrent is to download Fxhome Photokey 6 Pro as a crack torrent file from the torrent website using the torrent client software. A torrent file is a small file that contains the metadata of the file or files that you want to download. It does not contain the actual file or files, but it tells your torrent client software where to find them and how to download them.</p>
63
- <p>To download Fxhome Photokey 6 Pro as a crack torrent, you need to follow these steps:</p>
64
- <ol>
65
- <li>Open your torrent client software and go to the torrent website that you have chosen in step 1.</li>
66
- <li>Search for Fxhome Photokey 6 Pro as a crack torrent using the search bar or the categories on the website.</li>
67
- <li>Look for the Fxhome Photokey 6 Pro as a crack torrent file that has the most seeds and the least leeches. Seeds are users who have the complete file and are sharing it with others. Leeches are users who are downloading the file but not sharing it with others. The more seeds and the less leeches a torrent file has, the faster and more reliable your download will be.</li>
68
- <li>Click on the Fxhome Photokey 6 Pro as a crack torrent file that you have selected and download it to your computer. You can either click on the download button or the magnet link on the website. A magnet link is a link that directly opens your torrent client software and starts downloading the file without having to download the torrent file first.</li>
69
- <li>Wait for your torrent client software to download Fxhome Photokey 6 Pro as a crack torrent file from other users. You can monitor the progress and speed of your download on your torrent client software.</li>
70
- </ol>
71
- <p>Note: Downloading Fxhome Photokey 6 Pro as a crack torrent might take some time depending on your internet connection speed, the size of the file, and the number of seeds and leeches available. It might also consume a lot of your bandwidth and data usage, so make sure you have enough before you start downloading.</p>
72
- <h3>Step 4: Install Fxhome Photokey 6 Pro as a Crack Torrent</h3>
73
- <p>The fourth step to download Fxhome Photokey 6 Pro as a crack torrent is to install Fxhome Photokey 6 Pro as a crack torrent file on your Mac computer using the installation wizard. An installation wizard is a program that guides you through the process of installing a software on your computer.</p>
74
- <p>To install Fxhome Photokey 6 Pro as a crack torrent, you need to follow these steps:</p>
75
- <ol>
76
- <li>Open your torrent client software and go to the folder where you have downloaded Fxhome Photokey 6 Pro as a crack torrent file.</li>
77
- <li>Double-click on the Fxhome Photokey 6 Pro as a crack torrent file to open it. You will see a folder that contains several files such as setup.exe, crack.exe, readme.txt, etc.</li>
78
- <li>Double-click on the setup.exe file to launch the installation wizard. Follow the instructions on the screen to install Fxhome Photokey 6 Pro on your Mac computer.</li>
79
- <li>When prompted, enter the serial number or activation code that is provided in the readme.txt file or in another file in the folder. This will activate your Fxhome Photokey 6 Pro software and allow you to use it without any limitations.</li>
80
- <li>If required, copy and paste the crack.exe file or another file in the folder to the installation directory of Fxhome Photokey 6 Pro on your Mac computer. This will replace the original file and crack the software, which means that it will bypass the security and activation features of the software.</li>
81
- <li>Once the installation is complete, you can launch Fxhome Photokey 6 Pro from your Mac computer and start using it for your photo editing needs.</li>
82
- </ol>
83
- <p>Note: Installing Fxhome Photokey 6 Pro as a crack torrent might cause some problems or errors on your Mac computer. Some of them are:</p>
84
- <ul>
85
- <li>The software might not work properly or crash frequently.</li>
86
- <li>The software might contain malware or viruses that can harm your computer or steal your information.</li>
87
- <li>The software might be detected and blocked by your antivirus or firewall software.</li>
88
- <li>The software might be incompatible with your Mac OS version or other software on your computer.</li>
89
- </ul>
90
- <h3>Step 5: Enjoy Fxhome Photokey 6 Pro as a Crack Torrent</h3>
91
- <p>The fifth and final step to download Fxhome Photokey 6 Pro as a crack torrent is to enjoy Fxhome Photokey 6 Pro as a crack torrent on your Mac computer. You can use Fxhome Photokey 6 Pro as a crack torrent to create stunning photos with green screen backgrounds and add effects, filters, layers, and text to them. You can also export your photos to various formats or share them online with your friends and family.</p>
92
- <p>However, you should also be aware of the risks and consequences of using Fxhome Photokey 6 Pro as a crack torrent. You should also respect the intellectual property rights of Fxhome and consider buying the software from the official website if you like it and want to support the developer.</p>
93
- <h2>Alternatives to Downloading Fxhome Photokey 6 Pro as a Crack Torrent</h2>
94
- <p>If you are not comfortable or satisfied with downloading Fxhome Photokey 6 Pro as a crack torrent, you might be wondering if there are any alternatives to getting Fxhome Photokey 6 Pro for Mac without downloading it as a crack torrent. The answer is yes, there are some alternative ways to get Fxhome Photokey 6 Pro for Mac without downloading it as a crack torrent. Here are some of them:</p>
95
- <h3>Alternative 1: Buy Fxhome Photokey 6 Pro from the Official Website</h3>
96
- <p>The best and most recommended alternative to downloading Fxhome Photokey 6 Pro as a crack torrent is to buy Fxhome Photokey 6 Pro from the official website. By buying Fxhome Photokey 6 Pro from the official website, you will get several benefits such as:</p>
97
- <ul>
98
- <li><strong>Getting the latest and most updated version of the software</strong>: By buying Fxhome Photokey 6 Pro from the official website, you will get the latest and most updated version of the software that has all the new features and improvements that Fxhome has released for Fxhome Photokey 6 Pro.</li>
99
- <li><strong>Getting access to updates and technical support from Fxhome</strong>: By buying Fxhome Photokey 6 Pro from the official website, you will also get access to updates and technical support from Fxhome. This means that you will be able to download any updates that Fxhome might release for Fxhome Photokey 6 Pro in the future. It also means that you will be able to get help or assistance from Fxhome if you encounter any problems or issues with Fxhome Photokey 6 Pro.</li>
100
- <li><strong>Getting a legal and ethical way to use the software</strong>: By buying Fxhome Photokey 6 Pro from the official website, you will also get a legal and ethical way to use the software. This means that you will not be violating any intellectual property rights or laws by using Fxhome Photokey 6 Pro. It also means that you will be supporting the software developer and showing your appreciation for their work and effort.</li>
101
- </ul>
102
- <p>To buy Fxhome Photokey 6 Pro from the official website, you need to follow these steps:</p>
103
- <ol>
104
- <li>Go to the official website of Fxhome Photokey 6 Pro at <a href="">https://fxhome.com/photokey-6-pro</a>.</li>
105
- <li>Click on the "Buy now" button and choose your preferred payment method and currency.</li>
106
- <li>Enter your personal and billing information and complete the payment process.</li>
107
- <li>Check your email for the confirmation and receipt of your purchase. You will also receive a download link and a serial number for Fxhome Photokey 6 Pro.</li>
108
- <li>Download and install Fxhome Photokey 6 Pro on your Mac computer using the download link and the serial number that you have received.</li>
109
- <li>Enjoy using Fxhome Photokey 6 Pro legally and ethically on your Mac computer.</li>
110
- </ol>
111
- <p>Note: Buying Fxhome Photokey 6 Pro from the official website might cost you $299, but it is worth it considering the benefits and advantages that you will get. It is also a one-time payment that will give you lifetime access to Fxhome Photokey 6 Pro. You can also get a 30-day money-back guarantee if you are not satisfied with Fxhome Photokey 6 Pro.</p>
112
- <h3>Alternative 2: Download Fxhome Photokey 6 Pro Demo Version from the Official Website</h3>
113
- <p>If you are not sure whether to buy Fxhome Photokey 6 Pro from the official website or not, you can also try downloading Fxhome Photokey 6 Pro demo version from the official website. A demo version is a free version of a software that allows you to test and evaluate the software before buying it. It usually has some limitations or restrictions compared to the full version of the software.</p>
114
- <p>By downloading Fxhome Photokey 6 Pro demo version from the official website, you will get some benefits such as:</p>
115
- <ul>
116
- <li><strong>Getting a safe and secure way to try the software</strong>: By downloading Fxhome Photokey 6 Pro demo version from the official website, you will get a safe and secure way to try the software. You will not have to worry about malware or viruses that might harm your computer or steal your information. You will also not have to worry about legal issues or consequences that might result from downloading Fxhome Photokey 6 Pro as a crack torrent.</li>
117
- <li><strong>Getting access to some of the features and functions of the software</strong>: By downloading Fxhome Photokey 6 Pro demo version from the official website, you will also get access to some of the features and functions of the software. You will be able to use Fxhome Photokey 6 Pro for your photo editing needs and see how it works and performs. You will also be able to compare it with other photo editing software that you might have or want to use.</li>
118
- <li><strong>Getting a chance to decide whether to buy the software or not</strong>: By downloading Fxhome Photokey 6 Pro demo version from the official website, you will also get a chance to decide whether to buy the software or not. You will be able to see if Fxhome Photokey 6 Pro meets your expectations and requirements. You will also be able to see if Fxhome Photokey 6 Pro is worth your money and time.</li>
119
- </ul>
120
- <p>To download Fxhome Photokey 6 Pro demo version from the official website, you need to follow these steps:</p>
121
- <ol>
122
- <li>Go to the official website of Fxhome Photokey 6 Pro at <a href="">https://fxhome.com/photokey-6-pro</a>.</li>
123
- <li>Click on the "Download free trial" button and enter your email address.</li>
124
- <li>Check your email for the download link and instructions for Fxhome Photokey 6 Pro demo version.</li>
125
- <li>Download and install Fxhome Photokey 6 Pro demo version on your Mac computer using the download link and instructions that you have received.</li>
126
- <li>Enjoy using Fxhome Photokey 6 Pro demo version on your Mac computer for a limited time.</li>
127
- </ol>
128
- <p>Note: Downloading Fxhome Photokey 6 Pro demo version from the official website is free, but it has some limitations or restrictions compared to the full version of the software. Some of them are:</p>
129
- <ul>
130
- <li>The demo version will expire after 14 days of use.</li>
131
- <li>The demo version will watermark your photos with the Fxhome logo.</li>
132
- <li>The demo version will not allow you to export your photos to other formats or share them online.</li>
133
- </ul>
134
- <h3>Alternative 3: Use Other Free or Paid Photo Editing Software for Mac</h3>
135
- <p>If you are not interested in buying or trying Fxhome Photokey 6 Pro from the official website, you can also use other free or paid photo editing software for Mac that have similar or better features than Fxhome Photokey 6 Pro. There are many photo editing software available on the internet, but not all of them are compatible or suitable for Mac users. Some of them might have compatibility issues, security vulnerabilities, or unwanted features such as ads or malware. Some of them might also be blocked or banned by your internet service provider or government due to legal issues.</p>
136
- <p>Therefore, you need to be careful and selective when choosing a photo editing software for Mac that can replace Fxhome Photokey 6 Pro. You need to look for a photo editing software that has a good reputation, a large user base, a high performance, and a secure encryption. You also need to check the legality and safety of the photo editing software in your country and region.</p>
137
- <p>Some examples of popular and reliable photo editing software for Mac that can replace Fxhome Photokey 6 Pro are:</p>
138
- <ul>
139
- <li><a href="">GIMP</a>: This is one of the best and most widely used photo editing software in the world. It is free, open-source, powerful, and versatile. It has all the essential features such as layers, masks, filters, brushes, tools, and more. It also has some advanced features such as scripting, plugins, animation, and more. It is compatible with various image formats such as JPEG, PNG, TIFF, BMP, and RAW.</li>
140
- <li><a href="">Photoshop</a>: This is another excellent and popular photo editing software that is developed by Adobe, a company that specializes in creating creative and multimedia software. It is paid, professional, and comprehensive. It has all the features and functions that you can imagine for photo editing such as layers, masks, filters, brushes, tools, and more. It also has some unique features such as smart objects, content-aware fill, camera raw, and more. It is compatible with various image formats such as JPEG, PNG, TIFF, BMP, and RAW.</li>
141
- <li><a href="">Pixlr</a>: This is a photo editing software that is designed specifically for online use. It is free, web-based, and easy-to-use. It has all the basic features such as layers, filters, tools and more. It also has some advanced features such as AI cutout, background removal, and more. It is compatible with various image formats such as JPEG, PNG, TIFF, BMP, and RAW.</li>
142
- </ul>
143
- <p>However, these are just examples and not endorsements. We do not recommend or encourage you to use any photo editing software for Mac without doing your own research and comparison. We are not responsible for any consequences or damages that might result from doing so.</p>
144
- <h2>Conclusion</h2>
145
- <p>In conclusion, Fxhome Photokey 6 Pro is a photo editing software for Mac that allows you to remove green screen backgrounds from your photos and replace them with any image you want. You can also add effects, filters, layers, and text to your photos and create stunning images in minutes.</p>
146
- <p>However, Fxhome Photokey 6 Pro is not a free software. It costs $299 for the software license fee. If you don't want to pay this amount, you might be tempted to download Fxhome Photokey 6 Pro as a crack torrent. A crack torrent is a file that contains the cracked version of a software, which means that it has been modified to bypass the security and activation features of the original software.</p>
147
- <p>Downloading Fxhome Photokey 6 Pro as a crack torrent might seem like a good idea, but it also comes with some serious disadvantages and risks. Some of them are:</p>
148
- <ul>
149
- <li>Risking malware infection from untrusted sources</li>
150
- <li>Violating the intellectual property rights of the software developer</li>
151
- <li>Facing legal consequences for software piracy</li>
152
- <li>Missing out on updates and technical support from the official website</li>
153
- </ul>
154
- <p>Therefore, we do not recommend or encourage you to download Fxhome Photokey 6 Pro as a crack torrent. We suggest that you either buy Fxhome Photokey 6 Pro from the official website, download Fxhome Photokey 6 Pro demo version from the official website, or use other free or paid photo editing software for Mac that have similar or better features than Fxhome Photokey 6 Pro.</p>
155
- <p>We hope that this article has helped you understand everything you need to know about Fxhome Photokey 6 Pro Mac crack torrent. We also hope that you have made an informed decision on whether to download Fxhome Photokey 6 Pro as a crack torrent or not. Thank you for reading and have a great day!</p>
156
- <h2>FAQs</h2>
157
- <p>Here are some frequently asked questions about Fxhome Photokey 6 Pro Mac crack torrent:</p>
158
- <ol>
159
- <li><strong>What is Fxhome Photokey 6 Pro?</strong></li>
160
- <p>Fxhome Photokey 6 Pro is a photo editing software for Mac that allows you to remove green screen backgrounds from your photos and replace them with any image you want. You can also add effects, filters, layers, and text to your photos and create stunning images in minutes.</p>
161
- <li><strong>What is a crack torrent?</strong></li>
162
- <p>A crack torrent is a file that contains the cracked version of a software, which means that it has been modified to bypass the security and activation features of the original software.</p>
163
- <li><strong>What are the advantages and disadvantages of downloading Fxhome Photokey 6 Pro as a crack torrent?</strong></li>
164
- <p>Some of the possible advantages of downloading Fxhome Photokey 6 Pro as a crack torrent are:</p>
165
- <ul>
166
- <li>Saving money on the software license fee</li>
167
- <li>Accessing the full version of the software without limitations</li>
168
- <li>Being able to use the software offline without internet connection</li>
169
- </ul>
170
- <p>Some of the possible disadvantages of downloading Fxhome Photokey 6 Pro as a crack torrent are:</p>
171
- <ul>
172
- <li>Risking malware infection from untrusted sources</li>
173
- <li>Violating the intellectual property rights of the software developer</li>
174
- <li>Facing legal consequences for software piracy</li>
175
- <li>Missing out on updates and technical support from the official website</li>
176
- </ul>
177
- <li><strong>How to download Fxhome Photokey 6 Pro as a crack torrent?</strong></li>
178
- <p>To download Fxhome Photokey 6 Pro as a crack torrent, you need to follow these steps:</p>
179
- <ol>
180
- <li>Find a reliable torrent website that offers Fxhome Photokey 6 Pro as a crack torrent.</li>
181
- <li>Download and install a torrent client software that allows you to download and manage torrent files.</li>
182
- <li>Download Fxhome Photokey 6 Pro as a crack torrent file from the torrent website using the torrent client software.</li>
183
- <li>Install Fxhome Photokey 6 Pro as a crack torrent file on your Mac computer using the installation wizard.</li <li>Enjoy Fxhome Photokey 6 Pro as a crack torrent on your Mac computer.</li>
184
- </ol>
185
- <li><strong>What are the alternatives to downloading Fxhome Photokey 6 Pro as a crack torrent?</strong></li>
186
- <p>Some of the alternatives to downloading Fxhome Photokey 6 Pro as a crack torrent are:</p>
187
- <ul>
188
- <li>Buy Fxhome Photokey 6 Pro from the official website.</li>
189
- <li>Download Fxhome Photokey 6 Pro demo version from the official website.</li>
190
- <li>Use other free or paid photo editing software for Mac that have similar or better features than Fxhome Photokey 6 Pro.</li>
191
- </ul>
192
- </ol></p> b2dd77e56b<br />
193
- <br />
194
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Download Game Point Blank Strike di Laptop Tanpa Ribet.md DELETED
@@ -1,123 +0,0 @@
1
- <br />
2
- <h1>How to Download Game Point Blank Strike on Laptop</h1>
3
- <p>If you are a fan of first-person shooter (FPS) games, you might have heard of Point Blank Strike, a popular mobile game that brings the thrill and excitement of the original Point Blank to your phone. But did you know that you can also play Point Blank Strike on your laptop? In this article, we will show you how to download and install Point Blank Strike on your laptop using two different methods. We will also explain why playing Point Blank Strike on your laptop can enhance your gaming experience and give you some tips to optimize your gameplay. Let's get started!</p>
4
- <h2>What is Point Blank Strike?</h2>
5
- <p>Point Blank Strike is a mobile FPS game developed by NEXON Company, based on the classic online game Point Blank that was released in 2008. Point Blank Strike features fast-paced multiplayer battles, various game modes, realistic graphics, and a huge arsenal of weapons. You can join up to eight players in team deathmatch, demolition, clan match, AI battle, and more. You can also customize your character, upgrade your weapons, and earn rewards as you play.</p>
6
- <h2>download game point blank strike di laptop</h2><br /><p><b><b>Download File</b> &#10022;&#10022;&#10022; <a href="https://urlin.us/2uT33C">https://urlin.us/2uT33C</a></b></p><br /><br />
7
- <h3>A mobile FPS game based on the original Point Blank</h3>
8
- <p>Point Blank Strike is a faithful adaptation of the original Point Blank, which was one of the most popular online FPS games in the world. Point Blank Strike retains the same gameplay mechanics, maps, modes, and weapons as the original game, but with improved graphics and performance for mobile devices. You can enjoy the classic Point Blank maps like Crackdown, Red Rock, and Burning Hall, as well as new maps exclusive to Point Blank Strike. You can also experience the original Point Blank mode, which features a legendary firearm, Kriss S.V.</p>
9
- <h3>Features of Point Blank Strike</h3>
10
- <p>Point Blank Strike has many features that make it one of the best mobile FPS games available. Here are some of them:</p>
11
- <ul>
12
- <li><b>Furious real-time multiplayer battles:</b> You can challenge other players from around the world in 4v4 matches that last only a few minutes. You don't have to wait for long matchmaking or loading times. Just tap and play!</li>
13
- <li><b>A simple auto-firing targeting system:</b> You don't have to worry about complicated controls or aiming. The game automatically fires at your enemies when they are in your crosshair. You can focus on moving, dodging, and strategizing.</li>
14
- <li><b>Even low-spec devices can join the fray:</b> You don't need a high-end phone to play Point Blank Strike. The game runs smoothly on devices with at least 1GB RAM and 385-410mb of free space. The game also utilizes an always-on PvP network, so you need a stable internet connection to play.</li>
15
- <li><b>A rich variety of live PvP content:</b> You can choose from different game modes, each with its own objectives and rules. You can also join or create a clan and compete with other clans for glory and rewards. You can also participate in events and tournaments for more fun and prizes.</li>
16
- </ul>
17
- <h2>Why Play Point Blank Strike on Laptop?</h2>
18
- <p>While playing Point Blank Strike on your phone is convenient and enjoyable, playing it on your laptop can offer some advantages that can make your gaming experience even better. Here are some reasons why you might want to play Point Blank Strike on your laptop:</p>
19
- <h3>Advantages of playing on a bigger screen and with better controls</h3> <p>One of the main benefits of playing Point Blank Strike on your laptop is that you can enjoy the game on a bigger and clearer screen. This can help you see the details of the game better, such as the enemies, the weapons, and the environment. You can also immerse yourself more in the game's atmosphere and graphics. Playing on a bigger screen can also reduce eye strain and fatigue, especially if you play for long hours.</p>
20
- <p>Another advantage of playing Point Blank Strike on your laptop is that you can use better controls, such as a keyboard and a mouse. This can give you more accuracy and precision when aiming and shooting. You can also customize your key bindings and mouse sensitivity to suit your preferences. Playing with a keyboard and a mouse can also improve your reaction time and reflexes, as well as your comfort and convenience.</p>
21
- <h3>How to avoid battery and performance issues on mobile devices</h3>
22
- <p>Another reason why you might want to play Point Blank Strike on your laptop is that you can avoid some of the common problems that affect mobile devices, such as battery drain and performance issues. Playing Point Blank Strike on your phone can consume a lot of battery power, especially if you play with high settings and brightness. This can limit your playtime and force you to charge your phone frequently. Playing on your laptop, on the other hand, can save your phone's battery life and let you play for longer without interruptions.</p>
23
- <p>How to play Point Blank: Strike on PC with LDPlayer<br />
24
- Point Blank: Strike PC version download<br />
25
- Cara install Point Blank: Strike di laptop<br />
26
- Point Blank: Strike system requirements for PC<br />
27
- Point Blank: Strike APK download for laptop<br />
28
- Point Blank: Strike tips and tricks for PC players<br />
29
- Point Blank: Strike best settings for laptop<br />
30
- Point Blank: Strike gameplay review on PC<br />
31
- Point Blank: Strike cheat codes for laptop<br />
32
- Point Blank: Strike online multiplayer mode on PC<br />
33
- Point Blank: Strike offline installer for laptop<br />
34
- Point Blank: Strike latest update for PC<br />
35
- Point Blank: Strike mod menu for laptop<br />
36
- Point Blank: Strike graphics comparison between PC and mobile<br />
37
- Point Blank: Strike keyboard and mouse controls for laptop<br />
38
- Point Blank: Strike emulator for PC free download<br />
39
- Point Blank: Strike patch notes for laptop<br />
40
- Point Blank: Strike ranking system on PC<br />
41
- Point Blank: Strike hack tool for laptop<br />
42
- Point Blank: Strike skins and weapons for PC<br />
43
- Point Blank: Strike minimum RAM for laptop<br />
44
- Point Blank: Strike FPS booster for PC<br />
45
- Point Blank: Strike error fix for laptop<br />
46
- Point Blank: Strike clan war on PC<br />
47
- Point Blank: Strike support and feedback for laptop<br />
48
- Point Blank: Strike custom maps for PC<br />
49
- Point Blank: Strike manual patch download for laptop<br />
50
- Point Blank: Strike new features on PC<br />
51
- Point Blank: Strike VPN for laptop<br />
52
- Point Blank: Strike tournaments and events on PC<br />
53
- Point Blank: Strike full client download for laptop<br />
54
- Point Blank: Strike cross-platform compatibility with PC and mobile<br />
55
- Point Blank: Strike redeem codes for laptop<br />
56
- Point Blank: Strike community and forum on PC<br />
57
- Point Blank: Strike partial client download for laptop<br />
58
- Point Blank: Strike best emulator for PC<br />
59
- Point Blank: Strike legendary weapons and skins for laptop<br />
60
- Point Blank: Strike server status on PC<br />
61
- Point Blank: Strike bug report for laptop<br />
62
- Point Blank: Strike beginners guide on PC<br />
63
- Point Blank: Strike sound effects and music for laptop<br />
64
- Point Blank: Strike test server on PC<br />
65
- Point Blank: Strike video settings for laptop<br />
66
- Point Blank: Strike aimbot and wallhack for PC<br />
67
- Point Blank: Strike maintenance schedule for laptop<br />
68
- Point Blank: Strike classic maps and modes on PC<br />
69
- Point Blank: Strike account transfer from mobile to laptop<br />
70
- Point Blank: Strike official website and social media on PC</p>
71
- <p>Playing Point Blank Strike on your phone can also cause some performance issues, such as lag, stuttering, freezing, and crashing. This can happen if your phone is not powerful enough to run the game smoothly, or if you have too many apps running in the background. These issues can ruin your gameplay and frustrate you. Playing on your laptop, however, can prevent these issues, as laptops usually have better hardware and software than phones. You can also adjust the game's settings to match your laptop's specifications and ensure optimal performance.</p>
72
- <h2>How to Download and Install Point Blank Strike on Laptop?</h2>
73
- <p>Now that you know why playing Point Blank Strike on your laptop can be a good idea, you might be wondering how to do it. There are two main ways to download and install Point Blank Strike on your laptop: using an Android emulator or using a phone app. We will explain each option in detail below.</p>
74
- <h3>Option 1: Using an Android emulator like BlueStacks</h3>
75
- <p>An Android emulator is a software that allows you to run Android apps on your PC or laptop. One of the most popular and reliable Android emulators is BlueStacks, which has over 500 million users worldwide. BlueStacks can run Point Blank Strike smoothly and efficiently on your laptop, as well as other Android games and apps. Here are the steps to download and install BlueStacks and Point Blank Strike on your laptop:</p>
76
- <h4>Steps to download and install BlueStacks and Point Blank Strike</h4>
77
- <ol>
78
- <li><b>Download BlueStacks from its official website:</b> Go to <a href="">https://www.bluestacks.com/</a> and click on the "Download BlueStacks" button. The download will start automatically. You can also choose the version of BlueStacks that matches your operating system (Windows or Mac).</li>
79
- <li><b>Install BlueStacks on your laptop:</b> After the download is complete, open the installer file and follow the instructions on the screen. The installation process may take a few minutes, depending on your internet speed and laptop's performance.</li>
80
- <li><b>Launch BlueStacks and sign in with your Google account:</b> Once BlueStacks is installed, open it from your desktop or start menu. You will see a welcome screen where you need to sign in with your Google account. This is necessary to access the Google Play Store and download apps. If you don't have a Google account, you can create one for free.</li>
81
- <li><b>Search for Point Blank Strike in the Google Play Store:</b> After signing in with your Google account, you will see the home screen of BlueStacks, where you can access various features and settings. Click on the "Game Center" tab at the top and then click on the "Google Play" icon at the bottom right corner. This will open the Google Play Store app within BlueStacks. In the search bar, type "Point Blank Strike" and hit enter.</li>
82
- <li><b>Download and install Point Blank Strike:</b> You will see a list of results related to Point Blank Strike. Click on the one that says "Point Blank: Strike" by NEXON Company. This will open the app's page where you can see its description, screenshots, ratings, reviews, etc. Click on the "Install" button to start downloading and installing Point Blank Strike on BlueStacks.</ <li><b>Launch Point Blank Strike and enjoy:</b> After Point Blank Strike is installed, you can launch it from the home screen of BlueStacks or from the "My Games" tab. You will see the game's logo and loading screen. Wait for a few seconds until the game is ready. You can then log in with your NEXON account or play as a guest. You can also choose your region and language. You are now ready to play Point Blank Strike on your laptop!</li>
83
- </ol>
84
- <h4>Tips to optimize BlueStacks settings and game controls</h4>
85
- <p>To make sure that you have the best gaming experience with BlueStacks and Point Blank Strike, here are some tips to optimize your settings and controls:</p>
86
- <ul>
87
- <li><b>Adjust the graphics and performance settings:</b> You can change the graphics and performance settings of BlueStacks to match your laptop's specifications and preferences. To do this, click on the "Menu" icon at the top right corner of BlueStacks and then click on "Settings". You will see a window where you can adjust various options, such as resolution, frame rate, CPU cores, RAM, etc. You can also choose from different presets, such as low, medium, high, or custom. For Point Blank Strike, we recommend using high or custom settings for better graphics and performance.</li>
88
- <li><b>Customize the game controls:</b> You can customize the game controls of Point Blank Strike to suit your keyboard and mouse. To do this, launch Point Blank Strike and then click on the "Keyboard" icon at the bottom right corner of BlueStacks. You will see a window where you can drag and drop different keys to different functions, such as movement, shooting, aiming, reloading, etc. You can also change the sensitivity and transparency of the keys. You can also use the default controls or choose from different presets, such as MOBA, FPS, or WASD.</li>
89
- </ul>
90
- <h3>Option 2: Using a phone app like Your Phone</h3>
91
- <p>An alternative way to play Point Blank Strike on your laptop is to use a phone app like Your Phone. Your Phone is a Microsoft app that allows you to link your Android phone to your Windows 10 PC or laptop. With Your Phone, you can access your phone's apps, notifications, photos, messages, calls, and more on your PC or laptop. You can also use Your Phone to play Point Blank Strike on your PC or laptop by mirroring your phone's screen. Here are the steps to link your PC and your Android phone with Your Phone and play Point Blank Strike on your PC:</p>
92
- <h4>Steps to link your PC and your Android phone with Your Phone</h4>
93
- <ol>
94
- <li><b>Download Your Phone app on your PC and your phone:</b> To use Your Phone, you need to download the app on both your PC and your phone. To download it on your PC, go to <a href="">https://www.microsoft.com/en-us/p/your-phone/9nmpj99vjbwv</a> and click on the "Get" button. The app will be installed automatically on your PC. To download it on your phone, go to <a href="">https://play.google.com/store/apps/details?id=com.microsoft.appmanager&hl=en_US&gl=US</a> and click on the "Install" button. The app will be downloaded and installed on your phone.</li>
95
- <li><b>Launch Your Phone app on your PC and sign in with your Microsoft account:</b> After installing Your Phone app on your PC, open it from your desktop or start menu. You will see a welcome screen where you need to sign in with your Microsoft account. This is necessary to link your PC and your phone. If you don't have a Microsoft account, you can create one for free.</li>
96
- <li><b>Launch Your Phone app on your phone and sign in with the same Microsoft account:</b> After installing Your Phone app on your phone, open it from your app drawer or home screen. You will see a welcome screen where you need to sign in with the same Microsoft account that you used on your PC. This will establish a connection between your PC and your phone.</li>
97
- <li><b>Allow permissions and settings on both devices:</b> To use Your Phone app properly, you need to allow some permissions and settings on both devices. On your PC, you will see a window where you can choose what features you want to use with Your Phone app, such as apps, notifications, photos, messages, calls, etc. Select the ones that you want and click on "Continue". On your phone, you will see a series of prompts where you need to allow some permissions and settings for Your Phone app, such as access to contacts, storage, notifications, etc. Tap on "Allow" or "OK" for each prompt.</li> <li><b>Verify the link code on both devices:</b> The final step to link your PC and your phone with Your Phone app is to verify the link code on both devices. On your PC, you will see a window where you will see a six-digit code. On your phone, you will see a notification where you will see the same code. Make sure that the codes match and then tap on "Allow" on your phone and click on "Done" on your PC. You have now successfully linked your PC and your phone with Your Phone app.</li>
98
- </ol>
99
- <h4>Tips to access and play Point Blank Strike on your PC</h4>
100
- <p>To access and play Point Blank Strike on your PC using Your Phone app, here are some tips to follow:</p>
101
- <ul>
102
- <li><b>Open the Apps feature on Your Phone app on your PC:</b> To access your phone's apps on your PC, you need to open the Apps feature on Your Phone app on your PC. To do this, launch Your Phone app on your PC and then click on the "Apps" icon at the left sidebar. You will see a list of all the apps that are installed on your phone.</li>
103
- <li><b>Find and launch Point Blank Strike from the list of apps:</b> To find and launch Point Blank Strike from the list of apps, you can either scroll down until you see it or use the search bar at the top to type its name. Once you find it, click on it to open it. You will see a window where you will see your phone's screen mirrored on your PC. You can also pin Point Blank Strike to your taskbar or start menu for easier access.</li>
104
- <li><b>Play Point Blank Strike using your mouse and keyboard:</b> To play Point Blank Strike using your mouse and keyboard, you need to use the same controls as you would on your phone. You can use your mouse to move the cursor and click to tap. You can also use some keyboard shortcuts, such as Esc to go back, F11 to enter or exit full screen mode, Ctrl + mouse wheel to zoom in or out, etc. You can also use the toolbar at the right side of the window to access some functions, such as home, back, recent apps, screenshot, etc.</li>
105
- </ul>
106
- <h2>Conclusion</h2>
107
- <p>Point Blank Strike is a fun and exciting mobile FPS game that you can also play on your laptop using either an Android emulator like BlueStacks or a phone app like Your Phone. Both methods have their pros and cons, so you can choose the one that suits you best. Playing Point Blank Strike on your laptop can offer some advantages, such as a bigger screen, better controls, and improved performance. However, you also need to make sure that you have a stable internet connection and enough storage space on both devices. We hope that this article has helped you learn how to download and install Point Blank Strike on your laptop and enjoy it to the fullest.</p>
108
- <h2>FAQs</h2>
109
- <p>Here are some frequently asked questions about Point Blank Strike and playing it on laptop:</p>
110
- <ol>
111
- <li><b>Is Point Blank Strike free to play?</b><br>
112
- Yes, Point Blank Strike is free to play. You can download it from the Google Play Store without paying anything. However, the game also offers some in-app purchases, such as gems, gold, weapons, skins, etc., that you can buy with real money if you want.</li>
113
- <li><b>Is Point Blank Strike compatible with Windows 10?</b><br>
114
- Yes, Point Blank Strike is compatible with Windows 10. You can play it on your Windows 10 laptop using either an Android emulator like BlueStacks or a phone app like Your Phone.</li>
115
- <li><b>Can I play Point Blank Strike offline?</b><br>
116
- No, Point Blank Strike is an online game that requires an internet connection to play. You cannot play it offline.</li>
117
- <li><b>Can I play Point Blank Strike with my friends?</b><br>
118
- Yes, Point Blank Strike is a multiplayer game that allows you to play with your friends. You can invite them to join your clan or team up with them in different game modes. You can also chat with them in-game or use voice chat if you have a microphone.</li>
119
- <li><b>How can I update Point Blank Strike?</b><br>
120
- To update Point Blank Strike, you need to go to the Google Play Store app on either your phone or BlueStacks and check for updates. If there is an update available, you can download and install it from there. If you are using Your Phone app, you need to update Point Blank Strike on your phone first and then launch it on your PC.</li>
121
- </ol></p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/APK Mody Free Download MOD APK Games and Apps for Android.md DELETED
@@ -1,155 +0,0 @@
1
-
2
- <h1>What is APK Mody and Why You Should Use It</h1>
3
- <p>If you are an Android user who loves to play games and use apps on your device, you might have heard of APK Mody. But what is it exactly and why should you use it? In this article, we will answer these questions and more. We will also show you how to download and install APK Mody on your device, as well as some tips and tricks for using it effectively.</p>
4
- <h2>apk mody</h2><br /><p><b><b>DOWNLOAD</b> &#127775; <a href="https://jinyurl.com/2uNP3Y">https://jinyurl.com/2uNP3Y</a></b></p><br /><br />
5
- <p>APK Mody is a website that provides thousands of MOD APK, Premium APK, and Original APK files for free. You can easily search and download any app or game you want from this site. Whether you want to enjoy unlimited resources, unlocked features, or access to paid apps and games, APK Mody has it all. You can also find safe and fast downloads of original versions of apps and games if you prefer to use them without any modifications.</p>
6
- <p>To use APK Mody, you need to have an Android device that supports installing APK files. You can download the APK file from the website and install it on your device manually. Here are the steps to do so:</p>
7
- <ol>
8
- <li>Go to <a href="(^1^)">https://apkmody.io</a> on your browser.</li>
9
- <li>Search for the app or game you want to download using the search button or browse the pre-designed categories.</li>
10
- <li>Select the app or game from the list of results and click on the download button.</li>
11
- <li>Wait for the download to finish and locate the APK file on your device storage.</li>
12
- <li>Tap on the APK file and follow the installation instructions on your screen.</li>
13
- <li>Enjoy your app or game!</li>
14
- </ol>
15
- <h2>Top Features of APK Mody</h2>
16
- <p>APK Mody offers a variety of features that make it one of the best sources for downloading apps and games for Android. Here are some of the top features that you can enjoy with APK Mody:</p>
17
- <p>apk mody download free games<br />
18
- apk mody modded apps for android<br />
19
- apk mody premium unlocked apps<br />
20
- apk mody best mod apk site<br />
21
- apk mody latest mod apk games<br />
22
- apk mody how to install mod apk<br />
23
- apk mody safe and secure downloads<br />
24
- apk mody unlimited money mod apk<br />
25
- apk mody action games mod apk<br />
26
- apk mody simulation games mod apk<br />
27
- apk mody adventure games mod apk<br />
28
- apk mody racing games mod apk<br />
29
- apk mody puzzle games mod apk<br />
30
- apk mody role-playing games mod apk<br />
31
- apk mody strategy games mod apk<br />
32
- apk mody casual games mod apk<br />
33
- apk mody arcade games mod apk<br />
34
- apk mody sports games mod apk<br />
35
- apk mody board games mod apk<br />
36
- apk mody card games mod apk<br />
37
- apk mody casino games mod apk<br />
38
- apk mody educational games mod apk<br />
39
- apk mody music games mod apk<br />
40
- apk mody trivia games mod apk<br />
41
- apk mody word games mod apk<br />
42
- apk mody social apps mod apk<br />
43
- apk mody communication apps mod apk<br />
44
- apk mody entertainment apps mod apk<br />
45
- apk mody productivity apps mod apk<br />
46
- apk mody tools apps mod apk<br />
47
- apk mody photography apps mod apk<br />
48
- apk mody video players apps mod apk<br />
49
- apk mody music and audio apps mod apk<br />
50
- apk mody personalization apps mod apk<br />
51
- apk mody lifestyle apps mod apk<br />
52
- apk mody health and fitness apps mod apk<br />
53
- apk mody travel and local apps mod apk<br />
54
- apk mody shopping apps mod apk<br />
55
- apk mody news and magazines apps mod apk</p>
56
- <h3>MOD APK</h3>
57
- <p>MOD APKs are modified versions of apps and games that have extra features that are not available in the original versions. For example, you can get unlimited coins, gems, lives, weapons, skins, etc. in your favorite games. You can also unlock premium features, remove ads, bypass restrictions, etc. in your favorite apps. With MOD APKs, you can enhance your gaming and app experience without spending any money.</p>
58
- <h3>Premium APK</h3>
59
- <p>Premium APKs are paid versions of apps and games that are normally not free to download from the official app stores. For example, you can get access to Spotify Premium, Netflix Premium, YouTube Premium, etc. without paying any subscription fees. You can also get access to premium games that have high-quality graphics, gameplay, and content. With Premium APKs, you can enjoy the best of the best apps and games without breaking the bank.</p>
60
- <h3>Original APK</h3>
61
- <p>Original APKs are the same versions of apps and games that you can find on the official app stores such as Google Play Store or Amazon Appstore. They are not modified or altered in any way. You can use them if you want to download apps and games that are not available in your region, or if you want to avoid any potential risks or compatibility issues that may come with MOD or Premium APKs. With Original APKs, you can get safe and fast downloads of apps and games that are verified and updated regularly.</p>
62
- <h2>How to Use APK Mody Effectively</h2>
63
- <p>Now that you know what APK Mody is and what it offers, you might be wondering how to use it effectively. Here are some tips and tricks that will help you get the most out of APK Mody:</p>
64
- <h3>Search for your desired app or game using the search button or browse the categories</h3>
65
- <p>APK Mody has a user-friendly interface that allows you to easily find the app or game you want. You can use the search button on the top right corner of the website and type in the name of the app or game. You can also browse the categories on the homepage, such as Action, Adventure, Arcade, Puzzle, Racing, etc. You can also filter the results by popularity, rating, date, etc.</p>
66
- <h3>Read the description, reviews, and installation instructions carefully</h3>
67
- <p>Before you download any app or game from APK Mody, make sure you read the description, reviews, and installation instructions carefully. The description will give you an overview of what the app or game is about, what features it has, and what requirements it needs. The reviews will give you feedback from other users who have tried the app or game. The installation instructions will guide you through the steps to install the app or game on your device. Reading these information will help you avoid any problems or errors that may occur during or after the installation.</p>
68
- <h3>Download and install the APK file on your device</h3>
69
- <p>Once you have chosen the app or game you want to download, click on the download button and wait for the download to finish. You will find the APK file on your device storage, usually in the Downloads folder. Tap on the APK file and follow the installation instructions on your screen. Depending on the type of APK file, you may need to enable unknown sources in your device settings before installing it. This will allow you to install apps and games from sources other than the official app stores.</p>
70
- <h2>Tips and Tricks for APK Mody Users</h2>
71
- <p>To make your experience with APK Mody even better, here are some more tips and tricks that you should know:</p>
72
- <h3>Enable unknown sources in your device settings before installing APK files</h3>
73
- <p>As mentioned above, some APK files may require you to enable unknown sources in your device settings before installing them. This is because they are not from the official app stores and may not be verified by Google or Amazon. To enable unknown sources, go to your device settings, then security, then unknown sources. Toggle the switch to allow installation of apps from unknown sources. You can also disable it after installing the app or game if you want.</p>
74
- <h3>Check for updates regularly to get the latest versions of apps and games</h3>
75
- <p>APK Mody updates its apps and games regularly to provide you with the latest versions and features. To check for updates, go to your device settings, then apps, then select the app or game you want to update. Tap on the menu button on the top right corner and choose check for updates. If there is an update available, tap on it and download it. You can also enable automatic updates in your device settings if you want.</p>
76
- <h3>Backup your data before using MOD APKs to avoid losing progress or data</h3>
77
- <p>MOD APKs are great for enhancing your gaming and app experience, but they may also have some risks. For example, some MOD APKs may not be compatible with your device or may cause errors or crashes. Some MOD APKs may also overwrite your data or progress in the original version of the app or game. To avoid losing your data or progress, make sure you backup your data before using MOD APKs. You can use cloud services such as Google Drive or Dropbox to backup your data online. You can also use external storage devices such as SD cards or USB drives to backup your data offline.</p>
78
- <h2>Pros and Cons of APK Mody</h2>
79
- <p>APK Mody is a great website for downloading apps and games for Android, but it also has some pros and cons that you should be aware of. Here are some of them:</p>
80
- <h3>Pros</h 3>Pros</h3>
81
- <ul>
82
- <li>Free: You can download and use any app or game from APK Mody without paying any fees or charges.</li>
83
- <li>Easy: You can download and install any app or game from APK Mody with just a few clicks and taps.</li>
84
- <li>Diverse: You can find a wide range of apps and games from different genres and categories on APK Mody.</li>
85
- <li>Secure: You can download and install any app or game from APK Mody without worrying about viruses, malware, or spyware.</li>
86
- </ul>
87
- <h3>Cons</h3>
88
- <ul>
89
- <li>Potential risks: Some apps and games from APK Mody may not be compatible with your device or may cause errors or crashes. Some apps and games may also have bugs or glitches that may affect your experience.</li>
90
- <li>Compatibility issues: Some apps and games from APK Mody may not work well with your device's operating system, hardware, or software. Some apps and games may also require root access or additional permissions that may compromise your device's security or performance.</li>
91
- <li>Legal concerns: Some apps and games from APK Mody may violate the intellectual property rights of the original developers or publishers. Downloading and using such apps and games may be illegal in some countries or regions.</li>
92
- </ul>
93
- <h2>Alternatives to APK Mody</h2>
94
- <p>If you are looking for alternatives to APK Mody, there are other popular APK download sites that you can try. Here are some of them:</p>
95
- <h3>APKPure</h3>
96
- <p>APKPure is a website that provides original and pure APK files for Android users. You can download and install any app or game from APKPure without any modifications or changes. You can also find region-locked, pre-registered, and beta versions of apps and games on APKPure. APKPure has a simple and clean interface that makes it easy to use.</p>
97
- <h4>Advantages of APKPure</h4>
98
- <ul>
99
- <li>Original and pure: You can download and install the original versions of apps and games without any modifications or changes.</li>
100
- <li>Region-locked, pre-registered, and beta: You can access apps and games that are not available in your region, that require pre-registration, or that are in beta testing.</li>
101
- <li>Simple and clean: You can enjoy a simple and clean interface that makes it easy to use.</li>
102
- </ul>
103
- <h4>Disadvantages of APKPure</h4>
104
- <ul>
105
- <li>No MOD or Premium: You cannot download or install any MOD or Premium versions of apps and games from APKPure.</li>
106
- <li>Limited selection: You may not find some apps or games that are available on other sites on APKPure.</li>
107
- <li>Potential risks: You may still encounter some potential risks such as viruses, malware, spyware, etc. when downloading or installing apps or games from APKPure.</li>
108
- </ul>
109
- <h3>Aptoide</h3>
110
- <p>Aptoide is a website that provides an alternative app store for Android users. You can download and install any app or game from Aptoide without using the official app stores such as Google Play Store or Amazon Appstore. You can also create your own app store on Aptoide and share it with other users. Aptoide has a social and community-based interface that makes it fun to use.</p>
111
- <h4>Advantages of Aptoide</h4>
112
- <ul>
113
- <li>Alternative app store: You can download and install any app or game from Aptoide without using the official app stores.</li>
114
- <li>Create your own app store: You can create your own app store on Aptoide and share it with other users.</li>
115
- <li>Social and community-based: You can enjoy a social and community-based interface that makes it fun to use.</li>
116
- </ul>
117
- <h4>Disadvantages of Aptoide</h4>
118
- <ul>
119
- <li>Potential risks: You may encounter some potential risks such as viruses, malware, spyware, etc. when downloading or installing apps or games from Aptoide.</li>
120
- <li>Quality issues: You may find some apps or games that have low quality, poor performance, or bad reviews on Aptoide.</li>
121
- <li>Legal concerns: You may violate the intellectual property rights of the original developers or publishers when downloading or using some apps or games from Aptoide.</li>
122
- </ul>
123
- <h3>HappyMod</h3>
124
- <p>HappyMod is a website that provides MOD APK files for Android users. You can download and install any MOD version of any app or game from HappyMod. You can also find original versions of some apps and games on HappyMod. HappyMod has a colorful and lively interface that makes it attractive to use.</p>
125
- <h4>Advantages of HappyMod</h4>
126
- <ul>
127
- <li>MOD <li>MOD APK: You can download and install any MOD version of any app or game from HappyMod. You can enjoy unlimited resources, unlocked features, and more with MOD APKs.</li>
128
- <li>Original APK: You can also find original versions of some apps and games on HappyMod. You can use them if you want to avoid any potential risks or compatibility issues with MOD APKs.</li>
129
- <li>Colorful and lively: You can enjoy a colorful and lively interface that makes it attractive to use.</li>
130
- </ul>
131
- <h4>Disadvantages of HappyMod</h4>
132
- <ul>
133
- <li>Potential risks: You may encounter some potential risks such as viruses, malware, spyware, etc. when downloading or installing apps or games from HappyMod.</li>
134
- <li>Compatibility issues: You may face some compatibility issues with your device or operating system when using some apps or games from HappyMod.</li>
135
- <li>Legal concerns: You may violate the intellectual property rights of the original developers or publishers when downloading or using some apps or games from HappyMod.</li>
136
- </ul>
137
- <h2>Conclusion and Recommendations</h2>
138
- <p>In conclusion, APK Mody is a website that provides thousands of MOD APK, Premium APK, and Original APK files for free. You can easily search and download any app or game you want from this site. Whether you want to enjoy unlimited resources, unlocked features, or access to paid apps and games, APK Mody has it all. You can also find safe and fast downloads of original versions of apps and games if you prefer to use them without any modifications.</p>
139
- <p>However, APK Mody also has some pros and cons that you should be aware of. Some of the pros are that it is free, easy, diverse, and secure. Some of the cons are that it may have potential risks, compatibility issues, and legal concerns. Therefore, you should use APK Mody with caution and discretion. You should also backup your data before using MOD APKs to avoid losing progress or data.</p>
140
- <p>If you are looking for alternatives to APK Mody, you can try other popular APK download sites such as APKPure, Aptoide, and HappyMod. Each site has its own features, advantages, and disadvantages that you should compare and contrast before choosing one. You should also read the description, reviews, and installation instructions carefully before downloading or installing any app or game from any site.</p>
141
- <p>We hope this article has helped you understand what APK Mody is and how to use it effectively. We also hope you have learned some tips and tricks for using APK Mody or its alternatives. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
142
- <h2>FAQs</h2>
143
- <p>Here are some frequently asked questions about APK Mody:</p>
144
- <h3>What is the difference between MOD APK and Premium APK?</h3>
145
- <p>MOD APKs are modified versions of apps and games that have extra features that are not available in the original versions. Premium APKs are paid versions of apps and games that are normally not free to download from the official app stores.</p>
146
- <h3>Is APK Mody safe to use?</h3>
147
- <p>APK Mody is generally safe to use, as it provides verified and updated apps and games. However, there may still be some potential risks such as viruses, malware, spyware, etc. when downloading or installing apps or games from this site. Therefore, you should always scan the APK files before installing them on your device.</p>
148
- <h3>Is APK Mody legal to use?</h3>
149
- <p>APK Mody may not be legal to use in some countries or regions, as it may violate the intellectual property rights of the original developers or publishers of the apps and games. Downloading and using such apps and games may be illegal in some cases. Therefore, you should check the laws and regulations of your country or region before using this site.</p>
150
- <h3>How do I update the apps and games from APK Mody?</h3>
151
- <p>You can update the apps and games from APK Mody by checking for updates regularly on your device settings or on the website. You can also enable automatic updates in your device settings if you want.</p>
152
- <h3>How do I uninstall the apps and games from APK Mody?</h3>
153
- <p>You can uninstall the apps and games from APK Mody by going to your device settings, then apps, then selecting the app or game you want to uninstall. Tap on the uninstall button and confirm your action.</p> 401be4b1e0<br />
154
- <br />
155
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Barbie Influencer Makeup and Dress Up Games for TikTok Fans.md DELETED
@@ -1,74 +0,0 @@
1
-
2
- <h1>Barbie Makeup and Dress Up Games: A Fun and Creative Way to Express Yourself</h1>
3
- <p>If you love fashion, beauty, and creativity, you will love playing barbie makeup and dress up games. These games let you transform your favorite barbie characters into stunning models, princesses, celebrities, or anything you can imagine. You can choose from a variety of makeup products, accessories, hairstyles, and outfits to create your own unique look. Whether you want to play online or offline, there are plenty of options for you to enjoy. In this article, we will tell you everything you need to know about barbie makeup and dress up games, including what they are, how to play them, and where to find them.</p>
4
- <h2>What are Barbie Makeup and Dress Up Games?</h2>
5
- <p>Barbie makeup and dress up games are games that allow you to customize the appearance of barbie dolls or characters using different tools and items. You can change their skin tone, eye color, hair color, hair style, makeup, jewelry, glasses, hats, scarves, bags, shoes, dresses, skirts, pants, tops, jackets, coats, and more. You can also choose from different themes and scenarios, such as weddings, parties, holidays, seasons, careers, hobbies, sports, fantasy, fairy tales, etc. The possibilities are endless!</p>
6
- <h2>barbie makeup and dress up games</h2><br /><p><b><b>Download Zip</b> > <a href="https://jinyurl.com/2uNUGC">https://jinyurl.com/2uNUGC</a></b></p><br /><br />
7
- <h3>The History of Barbie and Her Games</h3>
8
- <p>Barbie is one of the most popular and iconic dolls in the world. She was created by Ruth Handler in 1959 as a way to inspire young girls to pursue their dreams and aspirations. Since then, she has evolved into a global phenomenon with over 200 careers, 150 countries represented, and millions of fans. She has also inspired countless movies, TV shows, books, comics, video games, apps, websites, toys, and merchandise.</p>
9
- <p>One of the earliest forms of barbie games was the paper doll. Paper dolls are cut-out figures that can be dressed up with different outfits that are attached with tabs. Paper dolls were popular in the 19th and 20th centuries as a cheap and easy way to entertain children. They were also used as educational tools to teach about different cultures and customs. Barbie paper dolls were first introduced in 1964 and featured various outfits and accessories for different occasions.</p>
10
- <p>Another form of barbie games was the board game. Board games are games that involve moving pieces on a board according to a set of rules. Board games can be played by one or more players and can have different objectives and themes. Barbie board games were first introduced in 1961 and featured different challenges and adventures for barbie and her friends. Some examples of barbie board games are Barbie Queen of the Prom (1961), Barbie Dream Date (1982), Barbie Fashion Show (1996), etc.</p>
11
- <p>The most modern form of barbie games is the video game. Video games are games that involve interacting with a computer or a console using a controller or a keyboard. Video games can have different genres and modes such as action, adventure, puzzle, simulation, role-playing, etc. Barbie video games were first introduced in 1984 and featured different activities and stories for barbie and her friends. Some examples of barbie video games are Barbie Fashion Designer (1996), Barbie Horse Adventures (2003), Barbie Dreamhouse Party (2013), etc.</p>
12
- <h3>The Benefits of Playing Barbie Makeup and Dress Up Games</h3>
13
- <p>Playing barbie makeup and dress up games can have many benefits for your cognitive, emotional, social, and creative development. Here are some of them: - Playing barbie makeup and dress up games can improve your memory and concentration. You have to remember the different options and combinations that you have used or seen, and pay attention to the details and colors that match your style and theme. This can help you enhance your cognitive skills and mental focus. - Playing barbie makeup and dress up games can boost your self-esteem and confidence. You can express yourself freely and creatively, and experiment with different looks and styles that suit your personality and mood. You can also see how beautiful and fabulous you can be, and appreciate your own beauty and uniqueness. - Playing barbie makeup and dress up games can foster your social skills and communication. You can play with your friends or family, and share your ideas and opinions with them. You can also learn from their feedback and suggestions, and respect their preferences and tastes. You can also play online with other players from around the world, and make new friends and connections. - Playing barbie makeup and dress up games can stimulate your imagination and creativity. You can create your own stories and scenarios, and design your own characters and outfits. You can also mix and match different elements, and explore different possibilities and outcomes. You can also use your artistic sense and flair, and have fun with colors, shapes, patterns, textures, etc. <h3>The Types of Barbie Makeup and Dress Up Games</h3>
14
- <p>There are many types of barbie makeup and dress up games that you can choose from, depending on your interests and preferences. Here are some of the most popular ones: - Barbie Fashion Games: These games let you create fashionable outfits for barbie and her friends, using different clothes, shoes, accessories, etc. You can also choose from different themes such as casual, formal, sporty, etc. Some examples of barbie fashion games are Barbie Fashionista (2010), Barbie Fashion Closet (2017), Barbie Magical Fashion (2018), etc. - Barbie Makeover Games: These games let you give barbie a makeover, using different makeup products, hairstyles, skin treatments, etc. You can also choose from different themes such as glam, natural, bridal, etc. Some examples of barbie makeover games are Barbie Real Makeover (2014), Barbie Beauty Bath (2016), Barbie Glam Makeover (2019), etc. - Barbie Princess Games: These games let you dress up barbie as a princess, using different dresses, crowns, jewels, etc. You can also choose from different themes such as fairy tale, fantasy, modern, etc. Some examples of barbie princess games are Barbie Princess Dress Up (2012), Barbie Princess Adventure (2020), Barbie Princess Style (2021), etc. - Barbie Celebrity Games: These games let you dress up barbie as a celebrity, using different outfits, accessories, hairstyles, etc. You can also choose from different themes such as pop star, movie star, influencer, etc. Some examples of barbie celebrity games are Barbie Popstar Style (2015), Barbie Celebrity Style (2018), Barbie Hollywood Star (2020), etc.</p>
15
- <h2>How to Play Barbie Makeup and Dress Up Games?</h2>
16
- <p>Playing barbie makeup and dress up games is easy and fun. Here are the basic steps that you need to follow: <h3>Choose Your Favorite Barbie Character</h3>
17
- <p>The first step is to choose which barbie character you want to play with. You can choose from the classic barbie doll or one of her many friends such as Teresa, Nikki, Ken, Skipper, Chelsea, etc. You can also choose from different versions of barbie such as Dreamhouse Adventures, Princess Adventure, Life in the Dreamhouse, etc. You can also create your own custom barbie character by choosing her name, skin tone, eye color, hair color, hair style, etc.</p>
18
- <p>barbie fashion and beauty games<br />
19
- barbie princess makeover games<br />
20
- barbie doll house and salon games<br />
21
- barbie cooking and baking games<br />
22
- barbie dreamhouse adventures games<br />
23
- barbie mermaid and fairy games<br />
24
- barbie wedding and party games<br />
25
- barbie spa and nail games<br />
26
- barbie hair and accessories games<br />
27
- barbie tiktok and influencer games<br />
28
- barbie dress up and makeup online<br />
29
- barbie makeover games for girls<br />
30
- barbie fashion show and design games<br />
31
- barbie shopping and style games<br />
32
- barbie celebrity and movie star games<br />
33
- barbie magic and fantasy games<br />
34
- barbie beach and pool games<br />
35
- barbie camping and outdoor games<br />
36
- barbie dance and music games<br />
37
- barbie school and career games<br />
38
- play free barbie makeup and dress up games<br />
39
- best barbie makeup and dress up games 2023<br />
40
- new barbie makeup and dress up games 2023<br />
41
- fun barbie makeup and dress up games for kids<br />
42
- cool barbie makeup and dress up games for teens<br />
43
- cute barbie makeup and dress up games for girls<br />
44
- realistic barbie makeup and dress up games online<br />
45
- 3d barbie makeup and dress up games online<br />
46
- download barbie makeup and dress up games for pc<br />
47
- install barbie makeup and dress up games for android<br />
48
- crazygames.com - play free online barbie makeup and dress up games<br />
49
- play.google.com - download free app for barbie dreamhouse adventures game<br />
50
- youtube.com - watch videos of barbie makeup and dress up game tutorials<br />
51
- pinterest.com - find ideas for barbie makeup and dress up game outfits<br />
52
- instagram.com - follow accounts of barbie makeup and dress up game fans<br />
53
- facebook.com - join groups of barbie makeup and dress up game lovers<br />
54
- reddit.com - discuss tips and tricks for barbie makeup and dress up game challenges<br />
55
- amazon.com - buy products related to barbie makeup and dress up game accessories<br />
56
- ebay.com - sell items related to barbie makeup and dress up game collections<br />
57
- walmart.com - shop deals on barbie makeup and dress up game toys</p>
58
- <h3>Select Your Style and Theme</h3>
59
- <p>The next step is to select the style and theme that you want to play with. You can choose from different categories such as fashion, makeover, princess, celebrity, etc. You can also choose from different subcategories such as casual, formal, sporty, glam, natural, bridal, fairy tale, fantasy, modern, pop star, movie star, influencer, etc. You can also mix and match different styles and themes to create your own unique combination.</p>
60
- <h3>Apply Makeup and Accessories</h3>
61
- <p>The third step is to apply makeup and accessories to your barbie character. You can choose from different makeup products such as foundation, concealer, powder, blush, bronzer, highlighter, eyeshadow, eyeliner, mascara, eyebrow pencil, lipstick, lip gloss, etc. You can also choose from different accessories such as earrings, necklaces, bracelets, rings, watches, glasses, hats, scarves, bags, shoes, etc. You can also adjust the size, color, and position of the makeup and accessories to suit your preference.</p>
62
- <h3>Dress Up Barbie in Fabulous Outfits</h3>
63
- <p>The final step is to dress up barbie in fabulous outfits. You can choose from different clothes such as dresses, skirts, pants, tops, jackets, coats, etc. You can also choose from different patterns, textures, colors, and styles of the clothes. You can also layer and combine different clothes to create your own unique look.</p>
64
- <h2>Where to Find the Best Barbie Makeup and Dress Up Games?</h2>
65
- <p>There are many places where you can find the best barbie makeup and dress up games. Here are some of the most popular ones: <h3>Online Websites and Apps</h3>
66
- <p>One of the easiest and most convenient ways to play barbie makeup and dress up games is online. There are many websites and apps that offer a wide range of barbie games that you can play for free or for a small fee. You can access these websites and apps using your computer, tablet, smartphone, or any other device that has an internet connection. Some of the advantages of playing online are that you can play anytime, anywhere, and with anyone. You can also save your progress and share your creations with others. Here are some of the best online websites and apps for barbie makeup and dress up games: - CrazyGames.com: This is one of the most popular websites for online games. It has over 10, 000 games in various genres and categories, including barbie games. You can find hundreds of barbie makeup and dress up games on this website, such as Barbie Fashionista Challenge (2021), Barbie Winter Glam (2019), Barbie Date Crashing (2018), etc. You can play these games for free without downloading or registering. - Google Play Store: This is one of the most popular platforms for downloading apps for Android devices. It has over 3 million apps in various categories and genres, including barbie games. You can find hundreds of barbie makeup and dress up games on this platform, such as Barbie Dreamhouse Adventures (2018), Barbie Magical Fashion (2018), Barbie Fashion Closet (2017), etc. You can download these apps for free or for a small fee, and enjoy them on your device.</p>
67
- <h3>Offline Toys and Books</h3>
68
- <p>Another way to play barbie makeup and dress up games is offline. There are many toys and books that offer a physical and tangible way to play with barbie dolls and characters. You can buy these toys and books from various stores or online platforms such as Amazon.com, Walmart.com, Target.com, etc. Some of the advantages of playing offline are that you can play without needing an internet connection or a device. You can also touch and feel the dolls and items, and use your own imagination and creativity. Here are some of the best offline toys and books for barbie makeup and dress up games: - Barbie Dreamhouse Adventures: This is one of the most popular toys for barbie fans. It is a large and interactive dollhouse that features 8 rooms, 70 accessories, and a working elevator. You can use this toy to recreate scenes from the Barbie Dreamhouse Adventures TV show or create your own stories and adventures. You can also use this toy to play with your barbie dolls and dress them up in different outfits and accessories. - Barbie Style Your Way: This is one of the most popular books for barbie fans. It is a spiral-bound book that features over 100 stickers, 10 paper dolls, and 30 outfits. You can use this book to dress up your paper dolls in different styles and themes, such as sporty, glam, boho, etc. You can also use the stickers to decorate the pages and create your own scenes and stories. <h2>Conclusion</h2>
69
- <p>Barbie makeup and dress up games are a fun and creative way to express yourself and enjoy fashion and beauty. You can play these games online or offline, and choose from different types of games, such as fashion, makeover, princess, celebrity, etc. You can also choose from different styles and themes, such as casual, formal, sporty, glam, natural, bridal, fairy tale, fantasy, modern, pop star, movie star, influencer, etc. You can also apply makeup and accessories, and dress up barbie in fabulous outfits. Playing these games can also improve your memory and concentration, boost your self-esteem and confidence, foster your social skills and communication, and stimulate your imagination and creativity. So what are you waiting for? Grab your favorite barbie character and start playing barbie makeup and dress up games today!</p>
70
- <h2>FAQs</h2>
71
- <p>Here are some of the frequently asked questions about barbie makeup and dress up games:</p>
72
- - Q: How old do you have to be to play barbie makeup and dress up games? - A: There is no age limit to play barbie makeup and dress up games. Anyone who loves barbie and fashion can play these games. However, some games may have age ratings or parental guidance depending on the content and features of the game. - Q: How much do barbie makeup and dress up games cost? - A: The cost of barbie makeup and dress up games varies depending on the platform and the game. Some games are free to play online or download on your device. Some games may require a small fee or a subscription to access more features or content. Some games may also have in-app purchases or ads that may affect your experience or budget. - Q: What are the best devices to play barbie makeup and dress up games? - A: You can play barbie makeup and dress up games on any device that has an internet connection or a compatible operating system. You can use your computer, tablet, smartphone, or any other device that can access online websites or apps. You can also use offline toys or books that do not require any device or connection. - Q: What are the best tips to play barbie makeup and dress up games? - A: Here are some of the best tips to play barbie makeup and dress up games: - Have fun and be creative. There is no right or wrong way to play these games. You can experiment with different options and combinations, and create your own unique look. - Be inspired by your favorite barbie characters, movies, shows, books, etc. You can try to recreate their looks or create your own versions of them. - Be respectful of other players' choices and opinions. You can share your creations with others, but do not judge or criticize them. Everyone has their own style and taste. - Learn from your mistakes and improve your skills. You can try different challenges and levels, and see how well you can do. You can also get feedback and suggestions from others, and use them to improve your game. - Q: Where can I find more information about barbie makeup and dress up games? - A: You can find more information about barbie makeup and dress up games on the official barbie website (https://barbie.mattel.com/), the official barbie YouTube channel (https://www.youtube.com/user/barbie), the official barbie Instagram account (https://www.instagram.com/barbie/), or any other reliable sources that offer reviews, guides, news, etc.</p> 401be4b1e0<br />
73
- <br />
74
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/unittest.py DELETED
@@ -1,29 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : unittest.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- import unittest
12
-
13
- import numpy as np
14
- from torch.autograd import Variable
15
-
16
-
17
- def as_numpy(v):
18
- if isinstance(v, Variable):
19
- v = v.data
20
- return v.cpu().numpy()
21
-
22
-
23
- class TorchTestCase(unittest.TestCase):
24
- def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
25
- npa, npb = as_numpy(a), as_numpy(b)
26
- self.assertTrue(
27
- np.allclose(npa, npb, atol=atol),
28
- 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
29
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-message.tsx DELETED
@@ -1,93 +0,0 @@
1
- import remarkGfm from 'remark-gfm'
2
- import remarkMath from 'remark-math'
3
- import supersub from 'remark-supersub'
4
- import remarkBreaks from 'remark-breaks'
5
- import { cn } from '@/lib/utils'
6
- import { CodeBlock } from '@/components/ui/codeblock'
7
- import { MemoizedReactMarkdown } from '@/components/markdown'
8
- import { LearnMore } from './learn-more'
9
- import { ChatMessageModel } from '@/lib/bots/bing/types'
10
- import { useEffect } from 'react'
11
- import { TurnCounter } from './turn-counter'
12
-
13
- export interface ChatMessageProps {
14
- message: ChatMessageModel
15
- }
16
-
17
- export function ChatMessage({ message, ...props }: ChatMessageProps) {
18
- useEffect(() => {
19
- if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) {
20
- window.scrollBy(0, 200)
21
- }
22
- }, [message.text])
23
-
24
- return message.text ? (
25
- <div
26
- className={cn('text-message', message.author)}
27
- {...props}
28
- >
29
- <div className="text-message-content">
30
- <MemoizedReactMarkdown
31
- linkTarget="_blank"
32
- className="prose break-words dark:prose-invert prose-p:leading-relaxed prose-pre:p-0"
33
- remarkPlugins={[remarkGfm, remarkMath, supersub, remarkBreaks]}
34
- components={{
35
- img(obj) {
36
- try {
37
- const uri = new URL(obj.src!)
38
- const w = uri.searchParams.get('w')
39
- const h = uri.searchParams.get('h')
40
- if (w && h) {
41
- uri.searchParams.delete('w')
42
- uri.searchParams.delete('h')
43
- return <a style={{ float: 'left', maxWidth: '50%' }} href={uri.toString()} target="_blank" rel="noopener noreferrer"><img src={obj.src} alt={obj.alt} width={w!} height={h!}/></a>
44
- }
45
- } catch (e) {
46
- }
47
- return <img src={obj.src} alt={obj.alt} title={obj.title} />
48
- },
49
- p({ children }) {
50
- return <p className="mb-2">{children}</p>
51
- },
52
- code({ node, inline, className, children, ...props }) {
53
- if (children.length) {
54
- if (children[0] == '▍') {
55
- return (
56
- <span className="mt-1 animate-pulse cursor-default">▍</span>
57
- )
58
- }
59
-
60
- children[0] = (children[0] as string).replace('`▍`', '▍')
61
- }
62
-
63
- const match = /language-(\w+)/.exec(className || '')
64
-
65
- if (inline) {
66
- return (
67
- <code className={className} {...props}>
68
- {children}
69
- </code>
70
- )
71
- }
72
-
73
- return (
74
- <CodeBlock
75
- key={Math.random()}
76
- language={(match && match[1]) || ''}
77
- value={String(children).replace(/\n$/, '')}
78
- {...props}
79
- />
80
- )
81
- }
82
- }}
83
- >
84
- {message.text}
85
- </MemoizedReactMarkdown>
86
- </div>
87
- <div className="text-message-footer">
88
- {message.author === 'bot' && <LearnMore sourceAttributions={message.sourceAttributions} />}
89
- {message.author === 'bot' && <TurnCounter throttling={message.throttling} />}
90
- </div>
91
- </div>
92
- ) : null
93
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- from .factory import (
2
- list_models,
3
- create_model,
4
- create_model_and_transforms,
5
- add_model_config,
6
- )
7
- from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
8
- from .model import (
9
- CLAP,
10
- CLAPTextCfg,
11
- CLAPVisionCfg,
12
- CLAPAudioCfp,
13
- convert_weights_to_fp16,
14
- trace_model,
15
- )
16
- from .openai import load_openai_model, list_openai_models
17
- from .pretrained import (
18
- list_pretrained,
19
- list_pretrained_tag_models,
20
- list_pretrained_model_tags,
21
- get_pretrained_url,
22
- download_pretrained,
23
- )
24
- from .tokenizer import SimpleTokenizer, tokenize
25
- from .transform import image_transform
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/dataset.py DELETED
@@ -1,147 +0,0 @@
1
- import collections
2
- import csv
3
- import logging
4
- import os
5
- import random
6
- from glob import glob
7
- from pathlib import Path
8
-
9
- import numpy as np
10
- import torch
11
- import torchvision
12
-
13
- logger = logging.getLogger(f'main.{__name__}')
14
-
15
-
16
- class VGGSound(torch.utils.data.Dataset):
17
-
18
- def __init__(self, split, specs_dir, transforms=None, splits_path='./data', meta_path='./data/vggsound.csv'):
19
- super().__init__()
20
- self.split = split
21
- self.specs_dir = specs_dir
22
- self.transforms = transforms
23
- self.splits_path = splits_path
24
- self.meta_path = meta_path
25
-
26
- vggsound_meta = list(csv.reader(open(meta_path), quotechar='"'))
27
- unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
28
- self.label2target = {label: target for target, label in enumerate(unique_classes)}
29
- self.target2label = {target: label for label, target in self.label2target.items()}
30
- self.video2target = {row[0]: self.label2target[row[2]] for row in vggsound_meta}
31
-
32
- split_clip_ids_path = os.path.join(splits_path, f'vggsound_{split}.txt')
33
- if not os.path.exists(split_clip_ids_path):
34
- self.make_split_files()
35
- clip_ids_with_timestamp = open(split_clip_ids_path).read().splitlines()
36
- clip_paths = [os.path.join(specs_dir, v + '_mel.npy') for v in clip_ids_with_timestamp]
37
- self.dataset = clip_paths
38
- # self.dataset = clip_paths[:10000] # overfit one batch
39
-
40
- # 'zyTX_1BXKDE_16000_26000'[:11] -> 'zyTX_1BXKDE'
41
- vid_classes = [self.video2target[Path(path).stem[:11]] for path in self.dataset]
42
- class2count = collections.Counter(vid_classes)
43
- self.class_counts = torch.tensor([class2count[cls] for cls in range(len(class2count))])
44
-
45
- # self.sample_weights = [len(self.dataset) / class2count[self.video2target[Path(path).stem[:11]]] for path in self.dataset]
46
-
47
- def __getitem__(self, idx):
48
- item = {}
49
-
50
- spec_path = self.dataset[idx]
51
- # 'zyTX_1BXKDE_16000_26000' -> 'zyTX_1BXKDE'
52
- video_name = Path(spec_path).stem[:11]
53
-
54
- item['input'] = np.load(spec_path)
55
- item['input_path'] = spec_path
56
-
57
- # if self.split in ['train', 'valid']:
58
- item['target'] = self.video2target[video_name]
59
- item['label'] = self.target2label[item['target']]
60
-
61
- if self.transforms is not None:
62
- item = self.transforms(item)
63
-
64
- return item
65
-
66
- def __len__(self):
67
- return len(self.dataset)
68
-
69
- def make_split_files(self):
70
- random.seed(1337)
71
- logger.info(f'The split files do not exist @ {self.splits_path}. Calculating the new ones.')
72
- # The downloaded videos (some went missing on YouTube and no longer available)
73
- available_vid_paths = sorted(glob(os.path.join(self.specs_dir, '*_mel.npy')))
74
- logger.info(f'The number of clips available after download: {len(available_vid_paths)}')
75
-
76
- # original (full) train and test sets
77
- vggsound_meta = list(csv.reader(open(self.meta_path), quotechar='"'))
78
- train_vids = {row[0] for row in vggsound_meta if row[3] == 'train'}
79
- test_vids = {row[0] for row in vggsound_meta if row[3] == 'test'}
80
- logger.info(f'The number of videos in vggsound train set: {len(train_vids)}')
81
- logger.info(f'The number of videos in vggsound test set: {len(test_vids)}')
82
-
83
- # class counts in test set. We would like to have the same distribution in valid
84
- unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
85
- label2target = {label: target for target, label in enumerate(unique_classes)}
86
- video2target = {row[0]: label2target[row[2]] for row in vggsound_meta}
87
- test_vid_classes = [video2target[vid] for vid in test_vids]
88
- test_target2count = collections.Counter(test_vid_classes)
89
-
90
- # now given the counts from test set, sample the same count for validation and the rest leave in train
91
- train_vids_wo_valid, valid_vids = set(), set()
92
- for target, label in enumerate(label2target.keys()):
93
- class_train_vids = [vid for vid in train_vids if video2target[vid] == target]
94
- random.shuffle(class_train_vids)
95
- count = test_target2count[target]
96
- valid_vids.update(class_train_vids[:count])
97
- train_vids_wo_valid.update(class_train_vids[count:])
98
-
99
- # make file with a list of available test videos (each video should contain timestamps as well)
100
- train_i = valid_i = test_i = 0
101
- with open(os.path.join(self.splits_path, 'vggsound_train.txt'), 'w') as train_file, \
102
- open(os.path.join(self.splits_path, 'vggsound_valid.txt'), 'w') as valid_file, \
103
- open(os.path.join(self.splits_path, 'vggsound_test.txt'), 'w') as test_file:
104
- for path in available_vid_paths:
105
- path = path.replace('_mel.npy', '')
106
- vid_name = Path(path).name
107
- # 'zyTX_1BXKDE_16000_26000'[:11] -> 'zyTX_1BXKDE'
108
- if vid_name[:11] in train_vids_wo_valid:
109
- train_file.write(vid_name + '\n')
110
- train_i += 1
111
- elif vid_name[:11] in valid_vids:
112
- valid_file.write(vid_name + '\n')
113
- valid_i += 1
114
- elif vid_name[:11] in test_vids:
115
- test_file.write(vid_name + '\n')
116
- test_i += 1
117
- else:
118
- raise Exception(f'Clip {vid_name} is neither in train, valid nor test. Strange.')
119
-
120
- logger.info(f'Put {train_i} clips to the train set and saved it to ./data/vggsound_train.txt')
121
- logger.info(f'Put {valid_i} clips to the valid set and saved it to ./data/vggsound_valid.txt')
122
- logger.info(f'Put {test_i} clips to the test set and saved it to ./data/vggsound_test.txt')
123
-
124
-
125
- if __name__ == '__main__':
126
- from transforms import Crop, StandardNormalizeAudio, ToTensor
127
- specs_path = '/home/nvme/data/vggsound/features/melspec_10s_22050hz/'
128
-
129
- transforms = torchvision.transforms.transforms.Compose([
130
- StandardNormalizeAudio(specs_path),
131
- ToTensor(),
132
- Crop([80, 848]),
133
- ])
134
-
135
- datasets = {
136
- 'train': VGGSound('train', specs_path, transforms),
137
- 'valid': VGGSound('valid', specs_path, transforms),
138
- 'test': VGGSound('test', specs_path, transforms),
139
- }
140
-
141
- print(datasets['train'][0])
142
- print(datasets['valid'][0])
143
- print(datasets['test'][0])
144
-
145
- print(datasets['train'].class_counts)
146
- print(datasets['valid'].class_counts)
147
- print(datasets['test'].class_counts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/IllusionDiffusion/README.md DELETED
@@ -1,15 +0,0 @@
1
- ---
2
- title: IllusionDiffusion
3
- emoji: 👁
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.44.3
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- hf_oauth: true
12
- disable_embedding: true
13
- ---
14
-
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/base.py DELETED
@@ -1,107 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Base class for all quantizers.
9
- """
10
-
11
- from dataclasses import dataclass, field
12
- import typing as tp
13
-
14
- import torch
15
- from torch import nn
16
-
17
-
18
- @dataclass
19
- class QuantizedResult:
20
- x: torch.Tensor
21
- codes: torch.Tensor
22
- bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item.
23
- penalty: tp.Optional[torch.Tensor] = None
24
- metrics: dict = field(default_factory=dict)
25
-
26
-
27
- class BaseQuantizer(nn.Module):
28
- """Base class for quantizers.
29
- """
30
-
31
- def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult:
32
- """
33
- Given input tensor x, returns first the quantized (or approximately quantized)
34
- representation along with quantized codes, bandwidth, and any penalty term for the loss.
35
- Finally, this returns a dict of metrics to update logging etc.
36
- Frame rate must be passed so that the bandwidth is properly computed.
37
- """
38
- raise NotImplementedError()
39
-
40
- def encode(self, x: torch.Tensor) -> torch.Tensor:
41
- """Encode a given input tensor with the specified sample rate at the given bandwidth.
42
- """
43
- raise NotImplementedError()
44
-
45
- def decode(self, codes: torch.Tensor) -> torch.Tensor:
46
- """Decode the given codes to the quantized representation.
47
- """
48
- raise NotImplementedError()
49
-
50
- @property
51
- def total_codebooks(self):
52
- """Total number of codebooks.
53
- """
54
- raise NotImplementedError()
55
-
56
- @property
57
- def num_codebooks(self):
58
- """Number of active codebooks.
59
- """
60
- raise NotImplementedError()
61
-
62
- def set_num_codebooks(self, n: int):
63
- """Set the number of active codebooks.
64
- """
65
- raise NotImplementedError()
66
-
67
-
68
- class DummyQuantizer(BaseQuantizer):
69
- """Fake quantizer that actually does not perform any quantization.
70
- """
71
- def __init__(self):
72
- super().__init__()
73
-
74
- def forward(self, x: torch.Tensor, frame_rate: int):
75
- q = x.unsqueeze(1)
76
- return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x))
77
-
78
- def encode(self, x: torch.Tensor) -> torch.Tensor:
79
- """Encode a given input tensor with the specified sample rate at the given bandwidth.
80
- In the case of the DummyQuantizer, the codes are actually identical
81
- to the input and resulting quantized representation as no quantization is done.
82
- """
83
- return x.unsqueeze(1)
84
-
85
- def decode(self, codes: torch.Tensor) -> torch.Tensor:
86
- """Decode the given codes to the quantized representation.
87
- In the case of the DummyQuantizer, the codes are actually identical
88
- to the input and resulting quantized representation as no quantization is done.
89
- """
90
- return codes.squeeze(1)
91
-
92
- @property
93
- def total_codebooks(self):
94
- """Total number of codebooks.
95
- """
96
- return 1
97
-
98
- @property
99
- def num_codebooks(self):
100
- """Total number of codebooks.
101
- """
102
- return self.total_codebooks
103
-
104
- def set_num_codebooks(self, n: int):
105
- """Set the number of active codebooks.
106
- """
107
- raise AttributeError("Cannot override the number of codebooks for the dummy quantizer")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abduhoshim/speech_emotion_detection/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Speech Emotion Detection
3
- emoji: 🌖
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhaykoul/Wizard-AI/app.py DELETED
@@ -1,56 +0,0 @@
1
- import streamlit as st
2
- from gradio_client import Client
3
-
4
- # Initialize the Gradio client with the API URL
5
- client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/--replicas/fx2sq/")
6
-
7
- # Initialize chat history in session state
8
- if "chat_history" not in st.session_state:
9
- st.session_state.chat_history = []
10
-
11
- # Streamlit UI for continuous interaction
12
- st.title("AI Wizard Chat")
13
-
14
- # Display chat history
15
- for message in st.session_state.chat_history:
16
- with st.chat_message(message["role"], avatar=("🧑‍💻" if message["role"] == 'user' else '🧙')):
17
- st.write(message["content"])
18
-
19
- # Input area for user message at the bottom
20
- user_input = st.text_input("You:")
21
- if st.button("Submit"):
22
- # Check if the user wants to exit
23
- if user_input.lower() == 'exit':
24
- st.write("Goodbye!")
25
- else:
26
- # Check if the user provided input
27
- if not user_input:
28
- st.warning("Please enter a message.")
29
- else:
30
- # Update system prompt with user input
31
- system_prompt = f"""
32
- Hello! I am AI Wizard. I am here to assist you with my magical knowledge and wisdom.
33
- Feel free to ask me anything, and I'll do my best to provide you with a magical answer.I am powered by HelpingAI which is developed by Abhay Koul a 16 years old developer. I can only tell that much about my developer
34
- input: {user_input}
35
- """
36
-
37
- # Display loading message
38
- with st.spinner("Casting a magic spell..."):
39
- # Make a prediction using the user's input and updated system prompt
40
- result = client.predict(
41
- user_input, # User's input message
42
- system_prompt, # Updated system prompt
43
- 0, # Temperature
44
- 2048, # Max new tokens
45
- 0.5, # Top-p (nucleus sampling)
46
- 1, # Repetition penalty
47
- api_name="/chat"
48
- )
49
-
50
- # Add user and AI messages to chat history
51
- st.session_state.chat_history.append({"role": "user", "content": user_input})
52
- st.session_state.chat_history.append({"role": "AI", "content": result})
53
- # Display chat history
54
- for message in st.session_state.chat_history:
55
- with st.chat_message(message["role"], avatar=("🧑‍💻" if message["role"] == 'user' else '🧙')):
56
- st.write(message["content"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/1.js DELETED
@@ -1 +0,0 @@
1
- export { default as component } from "../../../../src/routes/+error.svelte";
 
 
spaces/Admin08077/Cosmosis/README.md DELETED
@@ -1,16 +0,0 @@
1
- ---
2
- title: Cosmosis
3
- emoji: 🚀
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.26.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
-
12
- hf_oauth: true
13
- hf_oauth_redirect_path: /custom_callback_route # optional, see "Redirect URLs" below
14
- ---
15
-
16
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/sde_team.py DELETED
@@ -1,48 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List, Tuple
4
-
5
- from . import updater_registry as UpdaterRegistry
6
- from .base import BaseUpdater
7
- from agentverse.message import Message
8
-
9
- if TYPE_CHECKING:
10
- from agentverse.environments import BaseEnvironment
11
- from agentverse.agents import BaseAgent
12
-
13
-
14
- @UpdaterRegistry.register("sde_team")
15
- class SdeTeamUpdater(BaseUpdater):
16
- """
17
- The basic version of updater.
18
- The messages will be seen by all the receiver specified in the message.
19
- """
20
-
21
- def update_memory(self, environment: BaseEnvironment):
22
- added = False
23
- for message in environment.last_messages:
24
- if message.content == "":
25
- continue
26
- added |= self.add_message_to_all_agents(environment.agents, message)
27
-
28
- def add_message_to_all_agents(
29
- self, agents: List[BaseAgent], message: Message
30
- ) -> bool:
31
- if "all" in message.receiver:
32
- # If receiver is all, then add the message to all agents
33
- for agent in agents:
34
- agent.add_message_to_memory([message])
35
- return True
36
- else:
37
- # If receiver is not all, then add the message to the specified agents
38
- receiver_set = message.receiver
39
- for agent in agents:
40
- if agent.name in receiver_set:
41
- agent.add_message_to_memory([message])
42
- receiver_set.remove(agent.name)
43
- if len(receiver_set) > 0:
44
- missing_receiver = ", ".join(list(receiver_set))
45
- raise ValueError(
46
- "Receiver {} not found. Message discarded".format(missing_receiver)
47
- )
48
- return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetChildrenSizers.js DELETED
@@ -1,15 +0,0 @@
1
- var GetChildrenSizers = function (out) {
2
- if (out === undefined) {
3
- out = [];
4
- }
5
- var children = this.sizerChildren,
6
- child;
7
- for (var key in children) {
8
- child = children[key];
9
- if (child.isRexSizer) {
10
- out.push(child);
11
- }
12
- }
13
- return out;
14
- }
15
- export default GetChildrenSizers;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alcedo/yunmedia/common.js DELETED
@@ -1,173 +0,0 @@
1
- import fetch from 'node-fetch'
2
- import fs from 'fs'
3
- import os from 'os'
4
- import util from 'util'
5
- import stream from 'stream'
6
- import crypto from 'crypto'
7
- import child_process from 'child_process'
8
- import puppeteer from 'puppeteer'
9
-
10
- import { pcm2slk } from 'node-silk'
11
-
12
- //浏览器
13
- let browser
14
- //是否正在截图
15
- let onScreenshot = false
16
- //当前服务器ip
17
- let localIP = ''
18
-
19
- export async function getPttBuffer(file, ffmpeg = 'ffmpeg') {
20
- let buffer
21
- let time
22
- if (file instanceof Buffer || file.startsWith('base64://')) {
23
- // Buffer或base64
24
- const buf = file instanceof Buffer ? file : Buffer.from(file.slice(9), 'base64')
25
- const head = buf.slice(0, 7).toString()
26
- if (head.includes('SILK') || head.includes('AMR')) {
27
- return buf
28
- } else {
29
- const tmpfile = TMP_DIR + '/' + (0, uuid)()
30
- await fs.promises.writeFile(tmpfile, buf)
31
- return audioTrans(tmpfile, ffmpeg)
32
- }
33
- } else if (file.startsWith('http://') || file.startsWith('https://')) {
34
- // 网络文件
35
- // const readable = (await axios.get(file, { responseType: "stream" })).data
36
- try {
37
- const headers = {
38
- 'User-Agent': 'Dalvik/2.1.0 (Linux U Android 12 MI 9 Build/SKQ1.211230.001)'
39
- }
40
- let response = await fetch(file, {
41
- method: 'GET', // post请求
42
- headers
43
- })
44
- const buf = Buffer.from(await response.arrayBuffer())
45
- const tmpfile = TMP_DIR + '/' + (0, uuid)()
46
- await fs.promises.writeFile(tmpfile, buf)
47
- // await (0, pipeline)(readable.pipe(new DownloadTransform), fs.createWriteStream(tmpfile))
48
- const head = await read7Bytes(tmpfile)
49
- if (head.includes('SILK') || head.includes('AMR')) {
50
- fs.unlink(tmpfile, NOOP)
51
- buffer = buf
52
- } else {
53
- buffer = await audioTrans(tmpfile, ffmpeg)
54
- }
55
- } catch (err) {
56
- console.log(err)
57
- }
58
- } else {
59
- // 本地文件
60
- file = String(file).replace(/^file:\/{2}/, '')
61
- IS_WIN && file.startsWith('/') && (file = file.slice(1))
62
- const head = await read7Bytes(file)
63
- if (head.includes('SILK') || head.includes('AMR')) {
64
- buffer = await fs.promises.readFile(file)
65
- } else {
66
- buffer = await audioTrans(file, ffmpeg)
67
- }
68
- }
69
- return { buffer, time }
70
- }
71
-
72
- // 启动浏览器
73
- export async function launchBrowser() {
74
- // 如果浏览器已经存在,就先关闭它
75
- if (browser && !onScreenshot) {
76
- await browser.close()
77
- }
78
- // 启动一个无头浏览器,并且赋值给全局变量
79
- browser = await puppeteer.launch({
80
- executablePath: "/opt/google/chrome/chrome",
81
- headless: "new",
82
- args: ['--no-sandbox', "--disabled-setupid-sandbox"]
83
- })
84
- }
85
-
86
- // 截图指定的网址
87
- export async function screenshot(url, opt) {
88
- // 如果浏览器不存在,就先启动它
89
- if (!browser) {
90
- await launchBrowser()
91
- }
92
- onScreenshot = true
93
- try {
94
- // 创建一个新的页面
95
- const page = await browser.newPage()
96
- // 设置页面的视口大小
97
- await page.setViewport({ width: opt.width || 800, height: opt.height || 600, deviceScaleFactor: opt.dpr || 1 })
98
- // 访问指定的网址,比如http://example.com
99
- await page.goto(url, { timeout: opt.timeout || 12000 , waitUtil: opt.waitUtil || 'networkidle2'})
100
- // 等待页面加载完成
101
- if (opt.selector) await page.waitForSelector(opt.selector)
102
- if (opt.func) await page.waitForFunction(opt.func)
103
- if (opt.wait) await page.waitForTimeout(opt.wait)
104
- // 将页面保存为图片,比如example.png,你可以自己指定图片的格式和质量等选项
105
- let base64 = await page.screenshot({ encoding: 'base64', fullPage: true })
106
- // 关闭页面
107
- await page.close()
108
- onScreenshot = false
109
- return base64
110
- } catch (e) {
111
- onScreenshot = false
112
- return false
113
- }
114
- }
115
-
116
- // 检查网址能否访问
117
- export async function checkWebsite(url) {
118
- try {
119
- const response = await fetch(url)
120
- return response.ok
121
- } catch (error) {
122
- console.log(error)
123
- return false
124
- }
125
- }
126
-
127
- // 获取服务器ip
128
- export async function getPublicIP () {
129
- try {
130
- if (localIP === '') {
131
- const res = await fetch('https://api.ipify.org?format=json')
132
- const data = await res.json()
133
- localIP = data.ip
134
- }
135
- return localIP
136
- } catch (err) {
137
- return '127.0.0.1'
138
- }
139
- }
140
-
141
- async function audioTrans(file, ffmpeg = 'ffmpeg') {
142
- return new Promise((resolve, reject) => {
143
- const tmpfile = TMP_DIR + '/' + (0, uuid)();
144
- (0, child_process.exec)(`${ffmpeg} -i "${file}" -f s16le -ac 1 -ar 24000 "${tmpfile}"`, async (error, stdout, stderr) => {
145
- try {
146
- resolve(pcm2slk(fs.readFileSync(tmpfile)))
147
- } catch {
148
- reject('转码失败')
149
- } finally {
150
- fs.unlink(tmpfile, NOOP)
151
- }
152
- })
153
- })
154
- }
155
-
156
- async function read7Bytes(file) {
157
- const fd = await fs.promises.open(file, 'r')
158
- const buf = (await fd.read(Buffer.alloc(7), 0, 7, 0)).buffer
159
- fd.close()
160
- return buf
161
- }
162
-
163
- function uuid() {
164
- let hex = crypto.randomBytes(16).toString('hex')
165
- return hex.substr(0, 8) + '-' + hex.substr(8, 4) + '-' + hex.substr(12, 4) + '-' + hex.substr(16, 4) + '-' + hex.substr(20)
166
- }
167
-
168
- const IS_WIN = os.platform() === 'win32'
169
- /** 系统临时目录,用于临时存放下载的图片等内容 */
170
- const TMP_DIR = os.tmpdir()
171
- /** no operation */
172
- const NOOP = () => { }
173
- (0, util.promisify)(stream.pipeline)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/批量总结PDF文档.py DELETED
@@ -1,154 +0,0 @@
1
- from predict import predict_no_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
3
- import re
4
- import unicodedata
5
- fast_debug = False
6
-
7
- def is_paragraph_break(match):
8
- """
9
- 根据给定的匹配结果来判断换行符是否表示段落分隔。
10
- 如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。
11
- 也可以根据之前的内容长度来判断段落是否已经足够长。
12
- """
13
- prev_char, next_char = match.groups()
14
-
15
- # 句子结束标志
16
- sentence_endings = ".!?"
17
-
18
- # 设定一个最小段落长度阈值
19
- min_paragraph_length = 140
20
-
21
- if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length:
22
- return "\n\n"
23
- else:
24
- return " "
25
-
26
- def normalize_text(text):
27
- """
28
- 通过把连字(ligatures)等文本特殊符号转换为其基本形式来对文本进行归一化处理。
29
- 例如,将连字 "fi" 转换为 "f" 和 "i"。
30
- """
31
- # 对文本进行归一化处理,分解连字
32
- normalized_text = unicodedata.normalize("NFKD", text)
33
-
34
- # 替换其他特殊字符
35
- cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text)
36
-
37
- return cleaned_text
38
-
39
- def clean_text(raw_text):
40
- """
41
- 对从 PDF 提取出的原始文本进行清洗和格式化处理。
42
- 1. 对原始文本进行归一化处理。
43
- 2. 替换跨行的连词,例如 “Espe-\ncially” 转换为 “Especially”。
44
- 3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换。
45
- """
46
- # 对文本进行归一化处理
47
- normalized_text = normalize_text(raw_text)
48
-
49
- # 替换跨行的连词
50
- text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text)
51
-
52
- # 根据前后相邻字符的特点,找到原文本中的换行符
53
- newlines = re.compile(r'(\S)\n(\S)')
54
-
55
- # 根据 heuristic 规则,用空格或段落分隔符替换原换行符
56
- final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text)
57
-
58
- return final_text.strip()
59
-
60
- def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
61
- import time, glob, os, fitz
62
- print('begin analysis on:', file_manifest)
63
- for index, fp in enumerate(file_manifest):
64
- with fitz.open(fp) as doc:
65
- file_content = ""
66
- for page in doc:
67
- file_content += page.get_text()
68
- file_content = clean_text(file_content)
69
- print(file_content)
70
-
71
- prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
72
- i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
73
- i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
74
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
75
- print('[1] yield chatbot, history')
76
- yield chatbot, history, '正常'
77
-
78
- if not fast_debug:
79
- msg = '正常'
80
- # ** gpt request **
81
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
82
-
83
- print('[2] end gpt req')
84
- chatbot[-1] = (i_say_show_user, gpt_say)
85
- history.append(i_say_show_user); history.append(gpt_say)
86
- print('[3] yield chatbot, history')
87
- yield chatbot, history, msg
88
- print('[4] next')
89
- if not fast_debug: time.sleep(2)
90
-
91
- all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
92
- i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
93
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
94
- yield chatbot, history, '正常'
95
-
96
- if not fast_debug:
97
- msg = '正常'
98
- # ** gpt request **
99
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
100
-
101
- chatbot[-1] = (i_say, gpt_say)
102
- history.append(i_say); history.append(gpt_say)
103
- yield chatbot, history, msg
104
- res = write_results_to_file(history)
105
- chatbot.append(("完成了吗?", res))
106
- yield chatbot, history, msg
107
-
108
-
109
- @CatchException
110
- def 批量总结PDF文档(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
111
- import glob, os
112
-
113
- # 基本信息:功能、贡献者
114
- chatbot.append([
115
- "函数插件功能?",
116
- "批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"])
117
- yield chatbot, history, '正常'
118
-
119
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
120
- try:
121
- import fitz
122
- except:
123
- report_execption(chatbot, history,
124
- a = f"解析项目: {txt}",
125
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
126
- yield chatbot, history, '正常'
127
- return
128
-
129
- # 清空历史,以免输入溢出
130
- history = []
131
-
132
- # 检测输入参数,如没有给定输入参数,直接退出
133
- if os.path.exists(txt):
134
- project_folder = txt
135
- else:
136
- if txt == "": txt = '空空如也的输入栏'
137
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
138
- yield chatbot, history, '正常'
139
- return
140
-
141
- # 搜索需要处理的文件清单
142
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
143
- # [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
144
- # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
145
- # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
146
-
147
- # 如果没找到任何文件
148
- if len(file_manifest) == 0:
149
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
150
- yield chatbot, history, '正常'
151
- return
152
-
153
- # 开始正式执行任务
154
- yield from 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/editor.py DELETED
@@ -1,507 +0,0 @@
1
- # python 3.7
2
- """Utility functions for image editing from latent space."""
3
-
4
- import os.path
5
- import numpy as np
6
-
7
- __all__ = [
8
- 'parse_indices', 'interpolate', 'mix_style',
9
- 'get_layerwise_manipulation_strength', 'manipulate', 'parse_boundary_list'
10
- ]
11
-
12
-
13
- def parse_indices(obj, min_val=None, max_val=None):
14
- """Parses indices.
15
-
16
- If the input is a list or tuple, this function has no effect.
17
-
18
- The input can also be a string, which is either a comma separated list of
19
- numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will
20
- be ignored.
21
-
22
- Args:
23
- obj: The input object to parse indices from.
24
- min_val: If not `None`, this function will check that all indices are equal
25
- to or larger than this value. (default: None)
26
- max_val: If not `None`, this function will check that all indices are equal
27
- to or smaller than this field. (default: None)
28
-
29
- Returns:
30
- A list of integers.
31
-
32
- Raises:
33
- If the input is invalid, i.e., neither a list or tuple, nor a string.
34
- """
35
- if obj is None or obj == '':
36
- indices = []
37
- elif isinstance(obj, int):
38
- indices = [obj]
39
- elif isinstance(obj, (list, tuple, np.ndarray)):
40
- indices = list(obj)
41
- elif isinstance(obj, str):
42
- indices = []
43
- splits = obj.replace(' ', '').split(',')
44
- for split in splits:
45
- numbers = list(map(int, split.split('-')))
46
- if len(numbers) == 1:
47
- indices.append(numbers[0])
48
- elif len(numbers) == 2:
49
- indices.extend(list(range(numbers[0], numbers[1] + 1)))
50
- else:
51
- raise ValueError(f'Invalid type of input: {type(obj)}!')
52
-
53
- assert isinstance(indices, list)
54
- indices = sorted(list(set(indices)))
55
- for idx in indices:
56
- assert isinstance(idx, int)
57
- if min_val is not None:
58
- assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
59
- if max_val is not None:
60
- assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
61
-
62
- return indices
63
-
64
-
65
- def interpolate(src_codes, dst_codes, step=5):
66
- """Interpolates two sets of latent codes linearly.
67
-
68
- Args:
69
- src_codes: Source codes, with shape [num, *code_shape].
70
- dst_codes: Target codes, with shape [num, *code_shape].
71
- step: Number of interplolation steps, with source and target included. For
72
- example, if `step = 5`, three more samples will be inserted. (default: 5)
73
-
74
- Returns:
75
- Interpolated codes, with shape [num, step, *code_shape].
76
-
77
- Raises:
78
- ValueError: If the input two sets of latent codes are with different shapes.
79
- """
80
- if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
81
- raise ValueError(f'Shapes of source codes and target codes should both be '
82
- f'[num, *code_shape], but {src_codes.shape} and '
83
- f'{dst_codes.shape} are received!')
84
- num = src_codes.shape[0]
85
- code_shape = src_codes.shape[1:]
86
-
87
- a = src_codes[:, np.newaxis]
88
- b = dst_codes[:, np.newaxis]
89
- l = np.linspace(0.0, 1.0, step).reshape(
90
- [step if axis == 1 else 1 for axis in range(a.ndim)])
91
- results = a + l * (b - a)
92
- assert results.shape == (num, step, *code_shape)
93
-
94
- return results
95
-
96
-
97
- def mix_style(style_codes,
98
- content_codes,
99
- num_layers=1,
100
- mix_layers=None,
101
- is_style_layerwise=True,
102
- is_content_layerwise=True):
103
- """Mixes styles from style codes to those of content codes.
104
-
105
- Each style code or content code consists of `num_layers` codes, each of which
106
- is typically fed into a particular layer of the generator. This function mixes
107
- styles by partially replacing the codes of `content_codes` from some certain
108
- layers with those of `style_codes`.
109
-
110
- For example, if both style code and content code are with shape [10, 512],
111
- meaning to have 10 layers and each employs a 512-dimensional latent code. And
112
- the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
113
- Then the top half of the content code (with shape [3, 512]) will be replaced
114
- by the top half of the style code (also with shape [3, 512]).
115
-
116
- NOTE: This function also supports taking single-layer latent codes as inputs,
117
- i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
118
- case, the corresponding code will be first repeated for `num_layers` before
119
- performing style mixing.
120
-
121
- Args:
122
- style_codes: Style codes, with shape [num_styles, *code_shape] or
123
- [num_styles, num_layers, *code_shape].
124
- content_codes: Content codes, with shape [num_contents, *code_shape] or
125
- [num_contents, num_layers, *code_shape].
126
- num_layers: Total number of layers in the generative model. (default: 1)
127
- mix_layers: Indices of the layers to perform style mixing. `None` means to
128
- replace all layers, in which case the content code will be completely
129
- replaced by style code. (default: None)
130
- is_style_layerwise: Indicating whether the input `style_codes` are
131
- layer-wise codes. (default: True)
132
- is_content_layerwise: Indicating whether the input `content_codes` are
133
- layer-wise codes. (default: True)
134
- num_layers
135
-
136
- Returns:
137
- Codes after style mixing, with shape [num_styles, num_contents, num_layers,
138
- *code_shape].
139
-
140
- Raises:
141
- ValueError: If input `content_codes` or `style_codes` is with invalid shape.
142
- """
143
- if not is_style_layerwise:
144
- style_codes = style_codes[:, np.newaxis]
145
- style_codes = np.tile(
146
- style_codes,
147
- [num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
148
- if not is_content_layerwise:
149
- content_codes = content_codes[:, np.newaxis]
150
- content_codes = np.tile(
151
- content_codes,
152
- [num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
153
-
154
- if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
155
- style_codes.shape[1:] == content_codes.shape[1:]):
156
- raise ValueError(f'Shapes of style codes and content codes should be '
157
- f'[num_styles, num_layers, *code_shape] and '
158
- f'[num_contents, num_layers, *code_shape] respectively, '
159
- f'but {style_codes.shape} and {content_codes.shape} are '
160
- f'received!')
161
-
162
- layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
163
- if not layer_indices:
164
- layer_indices = list(range(num_layers))
165
-
166
- num_styles = style_codes.shape[0]
167
- num_contents = content_codes.shape[0]
168
- code_shape = content_codes.shape[2:]
169
-
170
- s = style_codes[:, np.newaxis]
171
- s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
172
- c = content_codes[np.newaxis]
173
- c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
174
-
175
- from_style = np.zeros(s.shape, dtype=bool)
176
- from_style[:, :, layer_indices] = True
177
- results = np.where(from_style, s, c)
178
- assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
179
-
180
- return results
181
-
182
-
183
- def get_layerwise_manipulation_strength(num_layers,
184
- truncation_psi,
185
- truncation_layers):
186
- """Gets layer-wise strength for manipulation.
187
-
188
- Recall the truncation trick played on layer [0, truncation_layers):
189
-
190
- w = truncation_psi * w + (1 - truncation_psi) * w_avg
191
-
192
- So, when using the same boundary to manipulate different layers, layer
193
- [0, truncation_layers) and layer [truncation_layers, num_layers) should use
194
- different strength to eliminate the effect from the truncation trick. More
195
- concretely, the strength for layer [0, truncation_layers) is set as
196
- `truncation_psi`, while that for other layers are set as 1.
197
- """
198
- strength = [1.0 for _ in range(num_layers)]
199
- if truncation_layers > 0:
200
- for layer_idx in range(0, truncation_layers):
201
- strength[layer_idx] = truncation_psi
202
- return strength
203
-
204
-
205
- def manipulate(latent_codes,
206
- boundary,
207
- start_distance=-5.0,
208
- end_distance=5.0,
209
- step=21,
210
- layerwise_manipulation=False,
211
- num_layers=1,
212
- manipulate_layers=None,
213
- is_code_layerwise=False,
214
- is_boundary_layerwise=False,
215
- layerwise_manipulation_strength=1.0):
216
- """Manipulates the given latent codes with respect to a particular boundary.
217
-
218
- Basically, this function takes a set of latent codes and a boundary as inputs,
219
- and outputs a collection of manipulated latent codes.
220
-
221
- For example, let `step` to be 10, `latent_codes` to be with shape [num,
222
- *code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm.
223
- Then the output will be with shape [num, 10, *code_shape]. For each 10-element
224
- manipulated codes, the first code is `start_distance` away from the original
225
- code (i.e., the input) along the `boundary` direction, while the last code is
226
- `end_distance` away. Remaining codes are linearly interpolated. Here,
227
- `distance` is sign sensitive.
228
-
229
- NOTE: This function also supports layer-wise manipulation, in which case the
230
- generator should be able to take layer-wise latent codes as inputs. For
231
- example, if the generator has 18 convolutional layers in total, and each of
232
- which takes an independent latent code as input. It is possible, sometimes
233
- with even better performance, to only partially manipulate these latent codes
234
- corresponding to some certain layers yet keeping others untouched.
235
-
236
- NOTE: Boundary is assumed to be normalized to unit norm already.
237
-
238
- Args:
239
- latent_codes: The input latent codes for manipulation, with shape
240
- [num, *code_shape] or [num, num_layers, *code_shape].
241
- boundary: The semantic boundary as reference, with shape [1, *code_shape] or
242
- [1, num_layers, *code_shape].
243
- start_distance: Start point for manipulation. (default: -5.0)
244
- end_distance: End point for manipulation. (default: 5.0)
245
- step: Number of manipulation steps. (default: 21)
246
- layerwise_manipulation: Whether to perform layer-wise manipulation.
247
- (default: False)
248
- num_layers: Number of layers. Only active when `layerwise_manipulation` is
249
- set as `True`. Should be a positive integer. (default: 1)
250
- manipulate_layers: Indices of the layers to perform manipulation. `None`
251
- means to manipulate latent codes from all layers. (default: None)
252
- is_code_layerwise: Whether the input latent codes are layer-wise. If set as
253
- `False`, the function will first repeat the input codes for `num_layers`
254
- times before perform manipulation. (default: False)
255
- is_boundary_layerwise: Whether the input boundary is layer-wise. If set as
256
- `False`, the function will first repeat boundary for `num_layers` times
257
- before perform manipulation. (default: False)
258
- layerwise_manipulation_strength: Manipulation strength for each layer. Only
259
- active when `layerwise_manipulation` is set as `True`. This field can be
260
- used to resolve the strength discrepancy across layers when truncation
261
- trick is on. See function `get_layerwise_manipulation_strength()` for
262
- details. A tuple, list, or `numpy.ndarray` is expected. If set as a single
263
- number, this strength will be used for all layers. (default: 1.0)
264
-
265
- Returns:
266
- Manipulated codes, with shape [num, step, *code_shape] if
267
- `layerwise_manipulation` is set as `False`, or shape [num, step,
268
- num_layers, *code_shape] if `layerwise_manipulation` is set as `True`.
269
-
270
- Raises:
271
- ValueError: If the input latent codes, boundary, or strength are with
272
- invalid shape.
273
- """
274
- if not (boundary.ndim >= 2 and boundary.shape[0] == 1):
275
- raise ValueError(f'Boundary should be with shape [1, *code_shape] or '
276
- f'[1, num_layers, *code_shape], but '
277
- f'{boundary.shape} is received!')
278
-
279
- if not layerwise_manipulation:
280
- assert not is_code_layerwise
281
- assert not is_boundary_layerwise
282
- num_layers = 1
283
- manipulate_layers = None
284
- layerwise_manipulation_strength = 1.0
285
-
286
- # Preprocessing for layer-wise manipulation.
287
- # Parse indices of manipulation layers.
288
- layer_indices = parse_indices(
289
- manipulate_layers, min_val=0, max_val=num_layers - 1)
290
- if not layer_indices:
291
- layer_indices = list(range(num_layers))
292
- # Make latent codes layer-wise if needed.
293
- assert num_layers > 0
294
- if not is_code_layerwise:
295
- x = latent_codes[:, np.newaxis]
296
- x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
297
- else:
298
- x = latent_codes
299
- if x.shape[1] != num_layers:
300
- raise ValueError(f'Latent codes should be with shape [num, num_layers, '
301
- f'*code_shape], where `num_layers` equals to '
302
- f'{num_layers}, but {x.shape} is received!')
303
- # Make boundary layer-wise if needed.
304
- if not is_boundary_layerwise:
305
- b = boundary
306
- b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
307
- else:
308
- b = boundary[0]
309
- if b.shape[0] != num_layers:
310
- raise ValueError(f'Boundary should be with shape [num_layers, '
311
- f'*code_shape], where `num_layers` equals to '
312
- f'{num_layers}, but {b.shape} is received!')
313
- # Get layer-wise manipulation strength.
314
- if isinstance(layerwise_manipulation_strength, (int, float)):
315
- s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
316
- elif isinstance(layerwise_manipulation_strength, (list, tuple)):
317
- s = layerwise_manipulation_strength
318
- if len(s) != num_layers:
319
- raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
320
- f'mismatches number of layers `{num_layers}`!')
321
- elif isinstance(layerwise_manipulation_strength, np.ndarray):
322
- s = layerwise_manipulation_strength
323
- if s.size != num_layers:
324
- raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
325
- f'mismatches number of layers `{num_layers}`!')
326
- else:
327
- raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
328
- s = np.array(s).reshape(
329
- [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
330
- b = b * s
331
-
332
- if x.shape[1:] != b.shape:
333
- raise ValueError(f'Latent code shape {x.shape} and boundary shape '
334
- f'{b.shape} mismatch!')
335
- num = x.shape[0]
336
- code_shape = x.shape[2:]
337
-
338
- x = x[:, np.newaxis]
339
- b = b[np.newaxis, np.newaxis, :]
340
- l = np.linspace(start_distance, end_distance, step).reshape(
341
- [step if axis == 1 else 1 for axis in range(x.ndim)])
342
- results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
343
- is_manipulatable = np.zeros(results.shape, dtype=bool)
344
- is_manipulatable[:, :, layer_indices] = True
345
- results = np.where(is_manipulatable, x + l * b, results)
346
- assert results.shape == (num, step, num_layers, *code_shape)
347
-
348
- return results if layerwise_manipulation else results[:, :, 0]
349
-
350
-
351
- def manipulate2(latent_codes,
352
- proj,
353
- mindex,
354
- start_distance=-5.0,
355
- end_distance=5.0,
356
- step=21,
357
- layerwise_manipulation=False,
358
- num_layers=1,
359
- manipulate_layers=None,
360
- is_code_layerwise=False,
361
- layerwise_manipulation_strength=1.0):
362
-
363
-
364
- if not layerwise_manipulation:
365
- assert not is_code_layerwise
366
- # assert not is_boundary_layerwise
367
- num_layers = 1
368
- manipulate_layers = None
369
- layerwise_manipulation_strength = 1.0
370
-
371
- # Preprocessing for layer-wise manipulation.
372
- # Parse indices of manipulation layers.
373
- layer_indices = parse_indices(
374
- manipulate_layers, min_val=0, max_val=num_layers - 1)
375
- if not layer_indices:
376
- layer_indices = list(range(num_layers))
377
- # Make latent codes layer-wise if needed.
378
- assert num_layers > 0
379
- if not is_code_layerwise:
380
- x = latent_codes[:, np.newaxis]
381
- x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
382
- else:
383
- x = latent_codes
384
- if x.shape[1] != num_layers:
385
- raise ValueError(f'Latent codes should be with shape [num, num_layers, '
386
- f'*code_shape], where `num_layers` equals to '
387
- f'{num_layers}, but {x.shape} is received!')
388
- # Make boundary layer-wise if needed.
389
- # if not is_boundary_layerwise:
390
- # b = boundary
391
- # b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
392
- # else:
393
- # b = boundary[0]
394
- # if b.shape[0] != num_layers:
395
- # raise ValueError(f'Boundary should be with shape [num_layers, '
396
- # f'*code_shape], where `num_layers` equals to '
397
- # f'{num_layers}, but {b.shape} is received!')
398
- # Get layer-wise manipulation strength.
399
- if isinstance(layerwise_manipulation_strength, (int, float)):
400
- s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
401
- elif isinstance(layerwise_manipulation_strength, (list, tuple)):
402
- s = layerwise_manipulation_strength
403
- if len(s) != num_layers:
404
- raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
405
- f'mismatches number of layers `{num_layers}`!')
406
- elif isinstance(layerwise_manipulation_strength, np.ndarray):
407
- s = layerwise_manipulation_strength
408
- if s.size != num_layers:
409
- raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
410
- f'mismatches number of layers `{num_layers}`!')
411
- else:
412
- raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
413
- # s = np.array(s).reshape(
414
- # [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
415
- # b = b * s
416
-
417
- # if x.shape[1:] != b.shape:
418
- # raise ValueError(f'Latent code shape {x.shape} and boundary shape '
419
- # f'{b.shape} mismatch!')
420
- num = x.shape[0]
421
- code_shape = x.shape[2:]
422
-
423
- x = x[:, np.newaxis]
424
- # b = b[np.newaxis, np.newaxis, :]
425
- # l = np.linspace(start_distance, end_distance, step).reshape(
426
- # [step if axis == 1 else 1 for axis in range(x.ndim)])
427
- results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
428
- is_manipulatable = np.zeros(results.shape, dtype=bool)
429
- is_manipulatable[:, :, layer_indices] = True
430
-
431
- tmp=MPC(proj,x,mindex,start_distance,end_distance,step)
432
- tmp = tmp[:, :,np.newaxis]
433
- tmp1 = np.tile(tmp, [num_layers if axis == 2 else 1 for axis in range(tmp.ndim)])
434
-
435
-
436
- results = np.where(is_manipulatable, tmp1, results)
437
- # print(results.shape)
438
- assert results.shape == (num, step, num_layers, *code_shape)
439
- return results if layerwise_manipulation else results[:, :, 0]
440
-
441
- def MPC(proj,x,mindex,start_distance,end_distance,step):
442
- # x shape (batch_size,1,num_layers,feature)
443
- # print(x.shape)
444
- x1=proj.transform(x[:,0,0,:]) #/np.sqrt(proj.explained_variance_) # (batch_size,num_pc)
445
-
446
- x1 = x1[:, np.newaxis]
447
- x1 = np.tile(x1, [step if axis == 1 else 1 for axis in range(x1.ndim)])
448
-
449
-
450
- l = np.linspace(start_distance, end_distance, step)[None,:]
451
- x1[:,:,mindex]+=l
452
-
453
- tmp=x1.reshape((-1,x1.shape[-1])) #*np.sqrt(proj.explained_variance_)
454
- # print('xxx')
455
- x2=proj.inverse_transform(tmp)
456
- x2=x2.reshape((x1.shape[0],x1.shape[1],-1))
457
-
458
- # x1 = x1[:, np.newaxis]
459
- # x1 = np.tile(x1, [step if axis == 1 else 1 for axis in range(x1.ndim)])
460
-
461
- return x2
462
-
463
-
464
-
465
-
466
- def parse_boundary_list(boundary_list_path):
467
- """Parses boundary list.
468
-
469
- Sometimes, a text file containing a list of boundaries will significantly
470
- simplify image manipulation with a large amount of boundaries. This function
471
- is used to parse boundary information from such list file.
472
-
473
- Basically, each item in the list should be with format
474
- `($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can
475
- disable a particular boundary.
476
-
477
- Sample:
478
-
479
- (age, z): $AGE_BOUNDARY_PATH
480
- (gender, w): $GENDER_BOUNDARY_PATH
481
- DISABLE(pose, wp): $POSE_BOUNDARY_PATH
482
-
483
- Args:
484
- boundary_list_path: Path to the boundary list.
485
-
486
- Returns:
487
- A dictionary, whose key is a two-element tuple (boundary_name, space_type)
488
- and value is the corresponding boundary path.
489
-
490
- Raise:
491
- ValueError: If the given boundary list does not exist.
492
- """
493
- if not os.path.isfile(boundary_list_path):
494
- raise ValueError(f'Boundary list `boundary_list_path` does not exist!')
495
-
496
- boundaries = {}
497
- with open(boundary_list_path, 'r') as f:
498
- for line in f:
499
- if line[:len('DISABLE')] == 'DISABLE':
500
- continue
501
- boundary_info, boundary_path = line.strip().split(':')
502
- boundary_name, space_type = boundary_info.strip()[1:-1].split(',')
503
- boundary_name = boundary_name.strip()
504
- space_type = space_type.strip().lower()
505
- boundary_path = boundary_path.strip()
506
- boundaries[(boundary_name, space_type)] = boundary_path
507
- return boundaries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/ipndm.md DELETED
@@ -1,20 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # improved pseudo numerical methods for diffusion models (iPNDM)
14
-
15
- ## Overview
16
-
17
- Original implementation can be found [here](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296).
18
-
19
- ## IPNDMScheduler
20
- [[autodoc]] IPNDMScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipelines.py DELETED
@@ -1,1745 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import glob
18
- import json
19
- import os
20
- import random
21
- import shutil
22
- import sys
23
- import tempfile
24
- import traceback
25
- import unittest
26
- import unittest.mock as mock
27
-
28
- import numpy as np
29
- import PIL
30
- import requests_mock
31
- import safetensors.torch
32
- import torch
33
- from parameterized import parameterized
34
- from PIL import Image
35
- from requests.exceptions import HTTPError
36
- from transformers import CLIPImageProcessor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
37
-
38
- from diffusers import (
39
- AutoencoderKL,
40
- ConfigMixin,
41
- DDIMPipeline,
42
- DDIMScheduler,
43
- DDPMPipeline,
44
- DDPMScheduler,
45
- DiffusionPipeline,
46
- DPMSolverMultistepScheduler,
47
- EulerAncestralDiscreteScheduler,
48
- EulerDiscreteScheduler,
49
- LMSDiscreteScheduler,
50
- ModelMixin,
51
- PNDMScheduler,
52
- StableDiffusionImg2ImgPipeline,
53
- StableDiffusionInpaintPipelineLegacy,
54
- StableDiffusionPipeline,
55
- UNet2DConditionModel,
56
- UNet2DModel,
57
- UniPCMultistepScheduler,
58
- logging,
59
- )
60
- from diffusers.pipelines.pipeline_utils import variant_compatible_siblings
61
- from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
62
- from diffusers.utils import (
63
- CONFIG_NAME,
64
- WEIGHTS_NAME,
65
- floats_tensor,
66
- is_compiled_module,
67
- nightly,
68
- require_torch_2,
69
- slow,
70
- torch_device,
71
- )
72
- from diffusers.utils.testing_utils import (
73
- CaptureLogger,
74
- enable_full_determinism,
75
- get_tests_dir,
76
- load_numpy,
77
- require_compel,
78
- require_flax,
79
- require_torch_gpu,
80
- run_test_in_subprocess,
81
- )
82
-
83
-
84
- enable_full_determinism()
85
-
86
-
87
- # Will be run via run_test_in_subprocess
88
- def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
89
- error = None
90
- try:
91
- # 1. Load models
92
- model = UNet2DModel(
93
- block_out_channels=(32, 64),
94
- layers_per_block=2,
95
- sample_size=32,
96
- in_channels=3,
97
- out_channels=3,
98
- down_block_types=("DownBlock2D", "AttnDownBlock2D"),
99
- up_block_types=("AttnUpBlock2D", "UpBlock2D"),
100
- )
101
- model = torch.compile(model)
102
- scheduler = DDPMScheduler(num_train_timesteps=10)
103
-
104
- ddpm = DDPMPipeline(model, scheduler)
105
-
106
- # previous diffusers versions stripped compilation off
107
- # compiled modules
108
- assert is_compiled_module(ddpm.unet)
109
-
110
- ddpm.to(torch_device)
111
- ddpm.set_progress_bar_config(disable=None)
112
-
113
- with tempfile.TemporaryDirectory() as tmpdirname:
114
- ddpm.save_pretrained(tmpdirname)
115
- new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
116
- new_ddpm.to(torch_device)
117
-
118
- generator = torch.Generator(device=torch_device).manual_seed(0)
119
- image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
120
-
121
- generator = torch.Generator(device=torch_device).manual_seed(0)
122
- new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
123
-
124
- assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"
125
- except Exception:
126
- error = f"{traceback.format_exc()}"
127
-
128
- results = {"error": error}
129
- out_queue.put(results, timeout=timeout)
130
- out_queue.join()
131
-
132
-
133
- class CustomEncoder(ModelMixin, ConfigMixin):
134
- def __init__(self):
135
- super().__init__()
136
-
137
-
138
- class CustomPipeline(DiffusionPipeline):
139
- def __init__(self, encoder: CustomEncoder, scheduler: DDIMScheduler):
140
- super().__init__()
141
- self.register_modules(encoder=encoder, scheduler=scheduler)
142
-
143
-
144
- class DownloadTests(unittest.TestCase):
145
- def test_one_request_upon_cached(self):
146
- # TODO: For some reason this test fails on MPS where no HEAD call is made.
147
- if torch_device == "mps":
148
- return
149
-
150
- with tempfile.TemporaryDirectory() as tmpdirname:
151
- with requests_mock.mock(real_http=True) as m:
152
- DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe", cache_dir=tmpdirname)
153
-
154
- download_requests = [r.method for r in m.request_history]
155
- assert download_requests.count("HEAD") == 15, "15 calls to files"
156
- assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json"
157
- assert (
158
- len(download_requests) == 32
159
- ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json"
160
-
161
- with requests_mock.mock(real_http=True) as m:
162
- DiffusionPipeline.download(
163
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
164
- )
165
-
166
- cache_requests = [r.method for r in m.request_history]
167
- assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD"
168
- assert cache_requests.count("GET") == 1, "model info is only GET"
169
- assert (
170
- len(cache_requests) == 2
171
- ), "We should call only `model_info` to check for _commit hash and `send_telemetry`"
172
-
173
- def test_less_downloads_passed_object(self):
174
- with tempfile.TemporaryDirectory() as tmpdirname:
175
- cached_folder = DiffusionPipeline.download(
176
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
177
- )
178
-
179
- # make sure safety checker is not downloaded
180
- assert "safety_checker" not in os.listdir(cached_folder)
181
-
182
- # make sure rest is downloaded
183
- assert "unet" in os.listdir(cached_folder)
184
- assert "tokenizer" in os.listdir(cached_folder)
185
- assert "vae" in os.listdir(cached_folder)
186
- assert "model_index.json" in os.listdir(cached_folder)
187
- assert "scheduler" in os.listdir(cached_folder)
188
- assert "feature_extractor" in os.listdir(cached_folder)
189
-
190
- def test_less_downloads_passed_object_calls(self):
191
- # TODO: For some reason this test fails on MPS where no HEAD call is made.
192
- if torch_device == "mps":
193
- return
194
-
195
- with tempfile.TemporaryDirectory() as tmpdirname:
196
- with requests_mock.mock(real_http=True) as m:
197
- DiffusionPipeline.download(
198
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
199
- )
200
-
201
- download_requests = [r.method for r in m.request_history]
202
- # 15 - 2 because no call to config or model file for `safety_checker`
203
- assert download_requests.count("HEAD") == 13, "13 calls to files"
204
- # 17 - 2 because no call to config or model file for `safety_checker`
205
- assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json"
206
- assert (
207
- len(download_requests) == 28
208
- ), "2 calls per file (13 files) + send_telemetry, model_info and model_index.json"
209
-
210
- with requests_mock.mock(real_http=True) as m:
211
- DiffusionPipeline.download(
212
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
213
- )
214
-
215
- cache_requests = [r.method for r in m.request_history]
216
- assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD"
217
- assert cache_requests.count("GET") == 1, "model info is only GET"
218
- assert (
219
- len(cache_requests) == 2
220
- ), "We should call only `model_info` to check for _commit hash and `send_telemetry`"
221
-
222
- def test_download_only_pytorch(self):
223
- with tempfile.TemporaryDirectory() as tmpdirname:
224
- # pipeline has Flax weights
225
- tmpdirname = DiffusionPipeline.download(
226
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
227
- )
228
-
229
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
230
- files = [item for sublist in all_root_files for item in sublist]
231
-
232
- # None of the downloaded files should be a flax file even if we have some here:
233
- # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
234
- assert not any(f.endswith(".msgpack") for f in files)
235
- # We need to never convert this tiny model to safetensors for this test to pass
236
- assert not any(f.endswith(".safetensors") for f in files)
237
-
238
- def test_force_safetensors_error(self):
239
- with tempfile.TemporaryDirectory() as tmpdirname:
240
- # pipeline has Flax weights
241
- with self.assertRaises(EnvironmentError):
242
- tmpdirname = DiffusionPipeline.download(
243
- "hf-internal-testing/tiny-stable-diffusion-pipe-no-safetensors",
244
- safety_checker=None,
245
- cache_dir=tmpdirname,
246
- use_safetensors=True,
247
- )
248
-
249
- def test_download_safetensors(self):
250
- with tempfile.TemporaryDirectory() as tmpdirname:
251
- # pipeline has Flax weights
252
- tmpdirname = DiffusionPipeline.download(
253
- "hf-internal-testing/tiny-stable-diffusion-pipe-safetensors",
254
- safety_checker=None,
255
- cache_dir=tmpdirname,
256
- )
257
-
258
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
259
- files = [item for sublist in all_root_files for item in sublist]
260
-
261
- # None of the downloaded files should be a pytorch file even if we have some here:
262
- # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
263
- assert not any(f.endswith(".bin") for f in files)
264
-
265
- def test_download_safetensors_index(self):
266
- for variant in ["fp16", None]:
267
- with tempfile.TemporaryDirectory() as tmpdirname:
268
- tmpdirname = DiffusionPipeline.download(
269
- "hf-internal-testing/tiny-stable-diffusion-pipe-indexes",
270
- cache_dir=tmpdirname,
271
- use_safetensors=True,
272
- variant=variant,
273
- )
274
-
275
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
276
- files = [item for sublist in all_root_files for item in sublist]
277
-
278
- # None of the downloaded files should be a safetensors file even if we have some here:
279
- # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder
280
- if variant is None:
281
- assert not any("fp16" in f for f in files)
282
- else:
283
- model_files = [f for f in files if "safetensors" in f]
284
- assert all("fp16" in f for f in model_files)
285
-
286
- assert len([f for f in files if ".safetensors" in f]) == 8
287
- assert not any(".bin" in f for f in files)
288
-
289
- def test_download_bin_index(self):
290
- for variant in ["fp16", None]:
291
- with tempfile.TemporaryDirectory() as tmpdirname:
292
- tmpdirname = DiffusionPipeline.download(
293
- "hf-internal-testing/tiny-stable-diffusion-pipe-indexes",
294
- cache_dir=tmpdirname,
295
- use_safetensors=False,
296
- variant=variant,
297
- )
298
-
299
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
300
- files = [item for sublist in all_root_files for item in sublist]
301
-
302
- # None of the downloaded files should be a safetensors file even if we have some here:
303
- # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder
304
- if variant is None:
305
- assert not any("fp16" in f for f in files)
306
- else:
307
- model_files = [f for f in files if "bin" in f]
308
- assert all("fp16" in f for f in model_files)
309
-
310
- assert len([f for f in files if ".bin" in f]) == 8
311
- assert not any(".safetensors" in f for f in files)
312
-
313
- def test_download_no_openvino_by_default(self):
314
- with tempfile.TemporaryDirectory() as tmpdirname:
315
- tmpdirname = DiffusionPipeline.download(
316
- "hf-internal-testing/tiny-stable-diffusion-open-vino",
317
- cache_dir=tmpdirname,
318
- )
319
-
320
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
321
- files = [item for sublist in all_root_files for item in sublist]
322
-
323
- # make sure that by default no openvino weights are downloaded
324
- assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files)
325
- assert not any("openvino_" in f for f in files)
326
-
327
- def test_download_no_onnx_by_default(self):
328
- with tempfile.TemporaryDirectory() as tmpdirname:
329
- tmpdirname = DiffusionPipeline.download(
330
- "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline",
331
- cache_dir=tmpdirname,
332
- )
333
-
334
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
335
- files = [item for sublist in all_root_files for item in sublist]
336
-
337
- # make sure that by default no onnx weights are downloaded
338
- assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files)
339
- assert not any((f.endswith(".onnx") or f.endswith(".pb")) for f in files)
340
-
341
- with tempfile.TemporaryDirectory() as tmpdirname:
342
- tmpdirname = DiffusionPipeline.download(
343
- "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline",
344
- cache_dir=tmpdirname,
345
- use_onnx=True,
346
- )
347
-
348
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
349
- files = [item for sublist in all_root_files for item in sublist]
350
-
351
- # if `use_onnx` is specified make sure weights are downloaded
352
- assert any((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files)
353
- assert any((f.endswith(".onnx")) for f in files)
354
- assert any((f.endswith(".pb")) for f in files)
355
-
356
- def test_download_no_safety_checker(self):
357
- prompt = "hello"
358
- pipe = StableDiffusionPipeline.from_pretrained(
359
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
360
- )
361
- pipe = pipe.to(torch_device)
362
- generator = torch.manual_seed(0)
363
- out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
364
-
365
- pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
366
- pipe_2 = pipe_2.to(torch_device)
367
- generator = torch.manual_seed(0)
368
- out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
369
-
370
- assert np.max(np.abs(out - out_2)) < 1e-3
371
-
372
- def test_load_no_safety_checker_explicit_locally(self):
373
- prompt = "hello"
374
- pipe = StableDiffusionPipeline.from_pretrained(
375
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
376
- )
377
- pipe = pipe.to(torch_device)
378
- generator = torch.manual_seed(0)
379
- out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
380
-
381
- with tempfile.TemporaryDirectory() as tmpdirname:
382
- pipe.save_pretrained(tmpdirname)
383
- pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None)
384
- pipe_2 = pipe_2.to(torch_device)
385
-
386
- generator = torch.manual_seed(0)
387
-
388
- out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
389
-
390
- assert np.max(np.abs(out - out_2)) < 1e-3
391
-
392
- def test_load_no_safety_checker_default_locally(self):
393
- prompt = "hello"
394
- pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
395
- pipe = pipe.to(torch_device)
396
-
397
- generator = torch.manual_seed(0)
398
- out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
399
-
400
- with tempfile.TemporaryDirectory() as tmpdirname:
401
- pipe.save_pretrained(tmpdirname)
402
- pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname)
403
- pipe_2 = pipe_2.to(torch_device)
404
-
405
- generator = torch.manual_seed(0)
406
-
407
- out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
408
-
409
- assert np.max(np.abs(out - out_2)) < 1e-3
410
-
411
- def test_cached_files_are_used_when_no_internet(self):
412
- # A mock response for an HTTP head request to emulate server down
413
- response_mock = mock.Mock()
414
- response_mock.status_code = 500
415
- response_mock.headers = {}
416
- response_mock.raise_for_status.side_effect = HTTPError
417
- response_mock.json.return_value = {}
418
-
419
- # Download this model to make sure it's in the cache.
420
- orig_pipe = DiffusionPipeline.from_pretrained(
421
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
422
- )
423
- orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}
424
-
425
- # Under the mock environment we get a 500 error when trying to reach the model.
426
- with mock.patch("requests.request", return_value=response_mock):
427
- # Download this model to make sure it's in the cache.
428
- pipe = DiffusionPipeline.from_pretrained(
429
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
430
- )
431
- comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}
432
-
433
- for m1, m2 in zip(orig_comps.values(), comps.values()):
434
- for p1, p2 in zip(m1.parameters(), m2.parameters()):
435
- if p1.data.ne(p2.data).sum() > 0:
436
- assert False, "Parameters not the same!"
437
-
438
- def test_local_files_only_are_used_when_no_internet(self):
439
- # A mock response for an HTTP head request to emulate server down
440
- response_mock = mock.Mock()
441
- response_mock.status_code = 500
442
- response_mock.headers = {}
443
- response_mock.raise_for_status.side_effect = HTTPError
444
- response_mock.json.return_value = {}
445
-
446
- # first check that with local files only the pipeline can only be used if cached
447
- with self.assertRaises(FileNotFoundError):
448
- with tempfile.TemporaryDirectory() as tmpdirname:
449
- orig_pipe = DiffusionPipeline.from_pretrained(
450
- "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True, cache_dir=tmpdirname
451
- )
452
-
453
- # now download
454
- orig_pipe = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-torch")
455
-
456
- # make sure it can be loaded with local_files_only
457
- orig_pipe = DiffusionPipeline.from_pretrained(
458
- "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True
459
- )
460
- orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}
461
-
462
- # Under the mock environment we get a 500 error when trying to connect to the internet.
463
- # Make sure it works local_files_only only works here!
464
- with mock.patch("requests.request", return_value=response_mock):
465
- # Download this model to make sure it's in the cache.
466
- pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
467
- comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}
468
-
469
- for m1, m2 in zip(orig_comps.values(), comps.values()):
470
- for p1, p2 in zip(m1.parameters(), m2.parameters()):
471
- if p1.data.ne(p2.data).sum() > 0:
472
- assert False, "Parameters not the same!"
473
-
474
- def test_download_from_variant_folder(self):
475
- for safe_avail in [False, True]:
476
- import diffusers
477
-
478
- diffusers.utils.import_utils._safetensors_available = safe_avail
479
-
480
- other_format = ".bin" if safe_avail else ".safetensors"
481
- with tempfile.TemporaryDirectory() as tmpdirname:
482
- tmpdirname = StableDiffusionPipeline.download(
483
- "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname
484
- )
485
- all_root_files = [t[-1] for t in os.walk(tmpdirname)]
486
- files = [item for sublist in all_root_files for item in sublist]
487
-
488
- # None of the downloaded files should be a variant file even if we have some here:
489
- # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
490
- assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
491
- assert not any(f.endswith(other_format) for f in files)
492
- # no variants
493
- assert not any(len(f.split(".")) == 3 for f in files)
494
-
495
- diffusers.utils.import_utils._safetensors_available = True
496
-
497
- def test_download_variant_all(self):
498
- for safe_avail in [False, True]:
499
- import diffusers
500
-
501
- diffusers.utils.import_utils._safetensors_available = safe_avail
502
-
503
- other_format = ".bin" if safe_avail else ".safetensors"
504
- this_format = ".safetensors" if safe_avail else ".bin"
505
- variant = "fp16"
506
-
507
- with tempfile.TemporaryDirectory() as tmpdirname:
508
- tmpdirname = StableDiffusionPipeline.download(
509
- "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant
510
- )
511
- all_root_files = [t[-1] for t in os.walk(tmpdirname)]
512
- files = [item for sublist in all_root_files for item in sublist]
513
-
514
- # None of the downloaded files should be a non-variant file even if we have some here:
515
- # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
516
- assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
517
- # unet, vae, text_encoder, safety_checker
518
- assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 4
519
- # all checkpoints should have variant ending
520
- assert not any(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files)
521
- assert not any(f.endswith(other_format) for f in files)
522
-
523
- diffusers.utils.import_utils._safetensors_available = True
524
-
525
- def test_download_variant_partly(self):
526
- for safe_avail in [False, True]:
527
- import diffusers
528
-
529
- diffusers.utils.import_utils._safetensors_available = safe_avail
530
-
531
- other_format = ".bin" if safe_avail else ".safetensors"
532
- this_format = ".safetensors" if safe_avail else ".bin"
533
- variant = "no_ema"
534
-
535
- with tempfile.TemporaryDirectory() as tmpdirname:
536
- tmpdirname = StableDiffusionPipeline.download(
537
- "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant
538
- )
539
- all_root_files = [t[-1] for t in os.walk(tmpdirname)]
540
- files = [item for sublist in all_root_files for item in sublist]
541
-
542
- unet_files = os.listdir(os.path.join(tmpdirname, "unet"))
543
-
544
- # Some of the downloaded files should be a non-variant file, check:
545
- # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
546
- assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
547
- # only unet has "no_ema" variant
548
- assert f"diffusion_pytorch_model.{variant}{this_format}" in unet_files
549
- assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 1
550
- # vae, safety_checker and text_encoder should have no variant
551
- assert sum(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) == 3
552
- assert not any(f.endswith(other_format) for f in files)
553
-
554
- diffusers.utils.import_utils._safetensors_available = True
555
-
556
- def test_download_broken_variant(self):
557
- for safe_avail in [False, True]:
558
- import diffusers
559
-
560
- diffusers.utils.import_utils._safetensors_available = safe_avail
561
- # text encoder is missing no variant and "no_ema" variant weights, so the following can't work
562
- for variant in [None, "no_ema"]:
563
- with self.assertRaises(OSError) as error_context:
564
- with tempfile.TemporaryDirectory() as tmpdirname:
565
- tmpdirname = StableDiffusionPipeline.from_pretrained(
566
- "hf-internal-testing/stable-diffusion-broken-variants",
567
- cache_dir=tmpdirname,
568
- variant=variant,
569
- )
570
-
571
- assert "Error no file name" in str(error_context.exception)
572
-
573
- # text encoder has fp16 variants so we can load it
574
- with tempfile.TemporaryDirectory() as tmpdirname:
575
- tmpdirname = StableDiffusionPipeline.download(
576
- "hf-internal-testing/stable-diffusion-broken-variants", cache_dir=tmpdirname, variant="fp16"
577
- )
578
-
579
- all_root_files = [t[-1] for t in os.walk(tmpdirname)]
580
- files = [item for sublist in all_root_files for item in sublist]
581
-
582
- # None of the downloaded files should be a non-variant file even if we have some here:
583
- # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet
584
- assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
585
- # only unet has "no_ema" variant
586
-
587
- diffusers.utils.import_utils._safetensors_available = True
588
-
589
- def test_local_save_load_index(self):
590
- prompt = "hello"
591
- for variant in [None, "fp16"]:
592
- for use_safe in [True, False]:
593
- pipe = StableDiffusionPipeline.from_pretrained(
594
- "hf-internal-testing/tiny-stable-diffusion-pipe-indexes",
595
- variant=variant,
596
- use_safetensors=use_safe,
597
- safety_checker=None,
598
- )
599
- pipe = pipe.to(torch_device)
600
- generator = torch.manual_seed(0)
601
- out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
602
-
603
- with tempfile.TemporaryDirectory() as tmpdirname:
604
- pipe.save_pretrained(tmpdirname)
605
- pipe_2 = StableDiffusionPipeline.from_pretrained(
606
- tmpdirname, safe_serialization=use_safe, variant=variant
607
- )
608
- pipe_2 = pipe_2.to(torch_device)
609
-
610
- generator = torch.manual_seed(0)
611
-
612
- out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
613
-
614
- assert np.max(np.abs(out - out_2)) < 1e-3
615
-
616
- def test_text_inversion_download(self):
617
- pipe = StableDiffusionPipeline.from_pretrained(
618
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
619
- )
620
- pipe = pipe.to(torch_device)
621
-
622
- num_tokens = len(pipe.tokenizer)
623
-
624
- # single token load local
625
- with tempfile.TemporaryDirectory() as tmpdirname:
626
- ten = {"<*>": torch.ones((32,))}
627
- torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))
628
-
629
- pipe.load_textual_inversion(tmpdirname)
630
-
631
- token = pipe.tokenizer.convert_tokens_to_ids("<*>")
632
- assert token == num_tokens, "Added token must be at spot `num_tokens`"
633
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32
634
- assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>"
635
-
636
- prompt = "hey <*>"
637
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
638
- assert out.shape == (1, 128, 128, 3)
639
-
640
- # single token load local with weight name
641
- with tempfile.TemporaryDirectory() as tmpdirname:
642
- ten = {"<**>": 2 * torch.ones((1, 32))}
643
- torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))
644
-
645
- pipe.load_textual_inversion(tmpdirname, weight_name="learned_embeds.bin")
646
-
647
- token = pipe.tokenizer.convert_tokens_to_ids("<**>")
648
- assert token == num_tokens + 1, "Added token must be at spot `num_tokens`"
649
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
650
- assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>"
651
-
652
- prompt = "hey <**>"
653
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
654
- assert out.shape == (1, 128, 128, 3)
655
-
656
- # multi token load
657
- with tempfile.TemporaryDirectory() as tmpdirname:
658
- ten = {"<***>": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])}
659
- torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))
660
-
661
- pipe.load_textual_inversion(tmpdirname)
662
-
663
- token = pipe.tokenizer.convert_tokens_to_ids("<***>")
664
- token_1 = pipe.tokenizer.convert_tokens_to_ids("<***>_1")
665
- token_2 = pipe.tokenizer.convert_tokens_to_ids("<***>_2")
666
-
667
- assert token == num_tokens + 2, "Added token must be at spot `num_tokens`"
668
- assert token_1 == num_tokens + 3, "Added token must be at spot `num_tokens`"
669
- assert token_2 == num_tokens + 4, "Added token must be at spot `num_tokens`"
670
- assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
671
- assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
672
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
673
- assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2"
674
-
675
- prompt = "hey <***>"
676
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
677
- assert out.shape == (1, 128, 128, 3)
678
-
679
- # multi token load a1111
680
- with tempfile.TemporaryDirectory() as tmpdirname:
681
- ten = {
682
- "string_to_param": {
683
- "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])
684
- },
685
- "name": "<****>",
686
- }
687
- torch.save(ten, os.path.join(tmpdirname, "a1111.bin"))
688
-
689
- pipe.load_textual_inversion(tmpdirname, weight_name="a1111.bin")
690
-
691
- token = pipe.tokenizer.convert_tokens_to_ids("<****>")
692
- token_1 = pipe.tokenizer.convert_tokens_to_ids("<****>_1")
693
- token_2 = pipe.tokenizer.convert_tokens_to_ids("<****>_2")
694
-
695
- assert token == num_tokens + 5, "Added token must be at spot `num_tokens`"
696
- assert token_1 == num_tokens + 6, "Added token must be at spot `num_tokens`"
697
- assert token_2 == num_tokens + 7, "Added token must be at spot `num_tokens`"
698
- assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
699
- assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
700
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
701
- assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2"
702
-
703
- prompt = "hey <****>"
704
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
705
- assert out.shape == (1, 128, 128, 3)
706
-
707
- # multi embedding load
708
- with tempfile.TemporaryDirectory() as tmpdirname1:
709
- with tempfile.TemporaryDirectory() as tmpdirname2:
710
- ten = {"<*****>": torch.ones((32,))}
711
- torch.save(ten, os.path.join(tmpdirname1, "learned_embeds.bin"))
712
-
713
- ten = {"<******>": 2 * torch.ones((1, 32))}
714
- torch.save(ten, os.path.join(tmpdirname2, "learned_embeds.bin"))
715
-
716
- pipe.load_textual_inversion([tmpdirname1, tmpdirname2])
717
-
718
- token = pipe.tokenizer.convert_tokens_to_ids("<*****>")
719
- assert token == num_tokens + 8, "Added token must be at spot `num_tokens`"
720
- assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32
721
- assert pipe._maybe_convert_prompt("<*****>", pipe.tokenizer) == "<*****>"
722
-
723
- token = pipe.tokenizer.convert_tokens_to_ids("<******>")
724
- assert token == num_tokens + 9, "Added token must be at spot `num_tokens`"
725
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
726
- assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>"
727
-
728
- prompt = "hey <*****> <******>"
729
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
730
- assert out.shape == (1, 128, 128, 3)
731
-
732
- # single token state dict load
733
- ten = {"<x>": torch.ones((32,))}
734
- pipe.load_textual_inversion(ten)
735
-
736
- token = pipe.tokenizer.convert_tokens_to_ids("<x>")
737
- assert token == num_tokens + 10, "Added token must be at spot `num_tokens`"
738
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32
739
- assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>"
740
-
741
- prompt = "hey <x>"
742
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
743
- assert out.shape == (1, 128, 128, 3)
744
-
745
- # multi embedding state dict load
746
- ten1 = {"<xxxxx>": torch.ones((32,))}
747
- ten2 = {"<xxxxxx>": 2 * torch.ones((1, 32))}
748
-
749
- pipe.load_textual_inversion([ten1, ten2])
750
-
751
- token = pipe.tokenizer.convert_tokens_to_ids("<xxxxx>")
752
- assert token == num_tokens + 11, "Added token must be at spot `num_tokens`"
753
- assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32
754
- assert pipe._maybe_convert_prompt("<xxxxx>", pipe.tokenizer) == "<xxxxx>"
755
-
756
- token = pipe.tokenizer.convert_tokens_to_ids("<xxxxxx>")
757
- assert token == num_tokens + 12, "Added token must be at spot `num_tokens`"
758
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
759
- assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>"
760
-
761
- prompt = "hey <xxxxx> <xxxxxx>"
762
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
763
- assert out.shape == (1, 128, 128, 3)
764
-
765
- # auto1111 multi-token state dict load
766
- ten = {
767
- "string_to_param": {
768
- "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])
769
- },
770
- "name": "<xxxx>",
771
- }
772
-
773
- pipe.load_textual_inversion(ten)
774
-
775
- token = pipe.tokenizer.convert_tokens_to_ids("<xxxx>")
776
- token_1 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_1")
777
- token_2 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_2")
778
-
779
- assert token == num_tokens + 13, "Added token must be at spot `num_tokens`"
780
- assert token_1 == num_tokens + 14, "Added token must be at spot `num_tokens`"
781
- assert token_2 == num_tokens + 15, "Added token must be at spot `num_tokens`"
782
- assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
783
- assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
784
- assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
785
- assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2"
786
-
787
- prompt = "hey <xxxx>"
788
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
789
- assert out.shape == (1, 128, 128, 3)
790
-
791
- # multiple references to multi embedding
792
- ten = {"<cat>": torch.ones(3, 32)}
793
- pipe.load_textual_inversion(ten)
794
-
795
- assert (
796
- pipe._maybe_convert_prompt("<cat> <cat>", pipe.tokenizer) == "<cat> <cat>_1 <cat>_2 <cat> <cat>_1 <cat>_2"
797
- )
798
-
799
- prompt = "hey <cat> <cat>"
800
- out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
801
- assert out.shape == (1, 128, 128, 3)
802
-
803
- def test_download_ignore_files(self):
804
- # Check https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files/blob/72f58636e5508a218c6b3f60550dc96445547817/model_index.json#L4
805
- with tempfile.TemporaryDirectory() as tmpdirname:
806
- # pipeline has Flax weights
807
- tmpdirname = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files")
808
- all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
809
- files = [item for sublist in all_root_files for item in sublist]
810
-
811
- # None of the downloaded files should be a pytorch file even if we have some here:
812
- # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
813
- assert not any(f in ["vae/diffusion_pytorch_model.bin", "text_encoder/config.json"] for f in files)
814
- assert len(files) == 14
815
-
816
-
817
- class CustomPipelineTests(unittest.TestCase):
818
- def test_load_custom_pipeline(self):
819
- pipeline = DiffusionPipeline.from_pretrained(
820
- "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
821
- )
822
- pipeline = pipeline.to(torch_device)
823
- # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub
824
- # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24
825
- assert pipeline.__class__.__name__ == "CustomPipeline"
826
-
827
- def test_load_custom_github(self):
828
- pipeline = DiffusionPipeline.from_pretrained(
829
- "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="main"
830
- )
831
-
832
- # make sure that on "main" pipeline gives only ones because of: https://github.com/huggingface/diffusers/pull/1690
833
- with torch.no_grad():
834
- output = pipeline()
835
-
836
- assert output.numel() == output.sum()
837
-
838
- # hack since Python doesn't like overwriting modules: https://stackoverflow.com/questions/3105801/unload-a-module-in-python
839
- # Could in the future work with hashes instead.
840
- del sys.modules["diffusers_modules.git.one_step_unet"]
841
-
842
- pipeline = DiffusionPipeline.from_pretrained(
843
- "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="0.10.2"
844
- )
845
- with torch.no_grad():
846
- output = pipeline()
847
-
848
- assert output.numel() != output.sum()
849
-
850
- assert pipeline.__class__.__name__ == "UnetSchedulerOneForwardPipeline"
851
-
852
- def test_run_custom_pipeline(self):
853
- pipeline = DiffusionPipeline.from_pretrained(
854
- "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
855
- )
856
- pipeline = pipeline.to(torch_device)
857
- images, output_str = pipeline(num_inference_steps=2, output_type="np")
858
-
859
- assert images[0].shape == (1, 32, 32, 3)
860
-
861
- # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102
862
- assert output_str == "This is a test"
863
-
864
- def test_local_custom_pipeline_repo(self):
865
- local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
866
- pipeline = DiffusionPipeline.from_pretrained(
867
- "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
868
- )
869
- pipeline = pipeline.to(torch_device)
870
- images, output_str = pipeline(num_inference_steps=2, output_type="np")
871
-
872
- assert pipeline.__class__.__name__ == "CustomLocalPipeline"
873
- assert images[0].shape == (1, 32, 32, 3)
874
- # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
875
- assert output_str == "This is a local test"
876
-
877
- def test_local_custom_pipeline_file(self):
878
- local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
879
- local_custom_pipeline_path = os.path.join(local_custom_pipeline_path, "what_ever.py")
880
- pipeline = DiffusionPipeline.from_pretrained(
881
- "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
882
- )
883
- pipeline = pipeline.to(torch_device)
884
- images, output_str = pipeline(num_inference_steps=2, output_type="np")
885
-
886
- assert pipeline.__class__.__name__ == "CustomLocalPipeline"
887
- assert images[0].shape == (1, 32, 32, 3)
888
- # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
889
- assert output_str == "This is a local test"
890
-
891
- def test_custom_model_and_pipeline(self):
892
- pipe = CustomPipeline(
893
- encoder=CustomEncoder(),
894
- scheduler=DDIMScheduler(),
895
- )
896
-
897
- with tempfile.TemporaryDirectory() as tmpdirname:
898
- pipe.save_pretrained(tmpdirname)
899
-
900
- pipe_new = CustomPipeline.from_pretrained(tmpdirname)
901
- pipe_new.save_pretrained(tmpdirname)
902
-
903
- conf_1 = dict(pipe.config)
904
- conf_2 = dict(pipe_new.config)
905
-
906
- del conf_2["_name_or_path"]
907
-
908
- assert conf_1 == conf_2
909
-
910
- @slow
911
- @require_torch_gpu
912
- def test_download_from_git(self):
913
- # Because adaptive_avg_pool2d_backward_cuda
914
- # does not have a deterministic implementation.
915
- clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
916
-
917
- feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
918
- clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
919
-
920
- pipeline = DiffusionPipeline.from_pretrained(
921
- "CompVis/stable-diffusion-v1-4",
922
- custom_pipeline="clip_guided_stable_diffusion",
923
- clip_model=clip_model,
924
- feature_extractor=feature_extractor,
925
- torch_dtype=torch.float16,
926
- )
927
- pipeline.enable_attention_slicing()
928
- pipeline = pipeline.to(torch_device)
929
-
930
- # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under:
931
- # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py
932
- assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion"
933
-
934
- image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0]
935
- assert image.shape == (512, 512, 3)
936
-
937
- def test_save_pipeline_change_config(self):
938
- pipe = DiffusionPipeline.from_pretrained(
939
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
940
- )
941
-
942
- with tempfile.TemporaryDirectory() as tmpdirname:
943
- pipe.save_pretrained(tmpdirname)
944
- pipe = DiffusionPipeline.from_pretrained(tmpdirname)
945
-
946
- assert pipe.scheduler.__class__.__name__ == "PNDMScheduler"
947
-
948
- # let's make sure that changing the scheduler is correctly reflected
949
- with tempfile.TemporaryDirectory() as tmpdirname:
950
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
951
- pipe.save_pretrained(tmpdirname)
952
- pipe = DiffusionPipeline.from_pretrained(tmpdirname)
953
-
954
- assert pipe.scheduler.__class__.__name__ == "DPMSolverMultistepScheduler"
955
-
956
-
957
- class PipelineFastTests(unittest.TestCase):
958
- def tearDown(self):
959
- # clean up the VRAM after each test
960
- super().tearDown()
961
- gc.collect()
962
- torch.cuda.empty_cache()
963
-
964
- import diffusers
965
-
966
- diffusers.utils.import_utils._safetensors_available = True
967
-
968
- def dummy_image(self):
969
- batch_size = 1
970
- num_channels = 3
971
- sizes = (32, 32)
972
-
973
- image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
974
- return image
975
-
976
- def dummy_uncond_unet(self, sample_size=32):
977
- torch.manual_seed(0)
978
- model = UNet2DModel(
979
- block_out_channels=(32, 64),
980
- layers_per_block=2,
981
- sample_size=sample_size,
982
- in_channels=3,
983
- out_channels=3,
984
- down_block_types=("DownBlock2D", "AttnDownBlock2D"),
985
- up_block_types=("AttnUpBlock2D", "UpBlock2D"),
986
- )
987
- return model
988
-
989
- def dummy_cond_unet(self, sample_size=32):
990
- torch.manual_seed(0)
991
- model = UNet2DConditionModel(
992
- block_out_channels=(32, 64),
993
- layers_per_block=2,
994
- sample_size=sample_size,
995
- in_channels=4,
996
- out_channels=4,
997
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
998
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
999
- cross_attention_dim=32,
1000
- )
1001
- return model
1002
-
1003
- @property
1004
- def dummy_vae(self):
1005
- torch.manual_seed(0)
1006
- model = AutoencoderKL(
1007
- block_out_channels=[32, 64],
1008
- in_channels=3,
1009
- out_channels=3,
1010
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
1011
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
1012
- latent_channels=4,
1013
- )
1014
- return model
1015
-
1016
- @property
1017
- def dummy_text_encoder(self):
1018
- torch.manual_seed(0)
1019
- config = CLIPTextConfig(
1020
- bos_token_id=0,
1021
- eos_token_id=2,
1022
- hidden_size=32,
1023
- intermediate_size=37,
1024
- layer_norm_eps=1e-05,
1025
- num_attention_heads=4,
1026
- num_hidden_layers=5,
1027
- pad_token_id=1,
1028
- vocab_size=1000,
1029
- )
1030
- return CLIPTextModel(config)
1031
-
1032
- @property
1033
- def dummy_extractor(self):
1034
- def extract(*args, **kwargs):
1035
- class Out:
1036
- def __init__(self):
1037
- self.pixel_values = torch.ones([0])
1038
-
1039
- def to(self, device):
1040
- self.pixel_values.to(device)
1041
- return self
1042
-
1043
- return Out()
1044
-
1045
- return extract
1046
-
1047
- @parameterized.expand(
1048
- [
1049
- [DDIMScheduler, DDIMPipeline, 32],
1050
- [DDPMScheduler, DDPMPipeline, 32],
1051
- [DDIMScheduler, DDIMPipeline, (32, 64)],
1052
- [DDPMScheduler, DDPMPipeline, (64, 32)],
1053
- ]
1054
- )
1055
- def test_uncond_unet_components(self, scheduler_fn=DDPMScheduler, pipeline_fn=DDPMPipeline, sample_size=32):
1056
- unet = self.dummy_uncond_unet(sample_size)
1057
- scheduler = scheduler_fn()
1058
- pipeline = pipeline_fn(unet, scheduler).to(torch_device)
1059
-
1060
- generator = torch.manual_seed(0)
1061
- out_image = pipeline(
1062
- generator=generator,
1063
- num_inference_steps=2,
1064
- output_type="np",
1065
- ).images
1066
- sample_size = (sample_size, sample_size) if isinstance(sample_size, int) else sample_size
1067
- assert out_image.shape == (1, *sample_size, 3)
1068
-
1069
- def test_stable_diffusion_components(self):
1070
- """Test that components property works correctly"""
1071
- unet = self.dummy_cond_unet()
1072
- scheduler = PNDMScheduler(skip_prk_steps=True)
1073
- vae = self.dummy_vae
1074
- bert = self.dummy_text_encoder
1075
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
1076
-
1077
- image = self.dummy_image().cpu().permute(0, 2, 3, 1)[0]
1078
- init_image = Image.fromarray(np.uint8(image)).convert("RGB")
1079
- mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32))
1080
-
1081
- # make sure here that pndm scheduler skips prk
1082
- inpaint = StableDiffusionInpaintPipelineLegacy(
1083
- unet=unet,
1084
- scheduler=scheduler,
1085
- vae=vae,
1086
- text_encoder=bert,
1087
- tokenizer=tokenizer,
1088
- safety_checker=None,
1089
- feature_extractor=self.dummy_extractor,
1090
- ).to(torch_device)
1091
- img2img = StableDiffusionImg2ImgPipeline(**inpaint.components).to(torch_device)
1092
- text2img = StableDiffusionPipeline(**inpaint.components).to(torch_device)
1093
-
1094
- prompt = "A painting of a squirrel eating a burger"
1095
-
1096
- generator = torch.manual_seed(0)
1097
- image_inpaint = inpaint(
1098
- [prompt],
1099
- generator=generator,
1100
- num_inference_steps=2,
1101
- output_type="np",
1102
- image=init_image,
1103
- mask_image=mask_image,
1104
- ).images
1105
- image_img2img = img2img(
1106
- [prompt],
1107
- generator=generator,
1108
- num_inference_steps=2,
1109
- output_type="np",
1110
- image=init_image,
1111
- ).images
1112
- image_text2img = text2img(
1113
- [prompt],
1114
- generator=generator,
1115
- num_inference_steps=2,
1116
- output_type="np",
1117
- ).images
1118
-
1119
- assert image_inpaint.shape == (1, 32, 32, 3)
1120
- assert image_img2img.shape == (1, 32, 32, 3)
1121
- assert image_text2img.shape == (1, 64, 64, 3)
1122
-
1123
- @require_torch_gpu
1124
- def test_pipe_false_offload_warn(self):
1125
- unet = self.dummy_cond_unet()
1126
- scheduler = PNDMScheduler(skip_prk_steps=True)
1127
- vae = self.dummy_vae
1128
- bert = self.dummy_text_encoder
1129
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
1130
-
1131
- sd = StableDiffusionPipeline(
1132
- unet=unet,
1133
- scheduler=scheduler,
1134
- vae=vae,
1135
- text_encoder=bert,
1136
- tokenizer=tokenizer,
1137
- safety_checker=None,
1138
- feature_extractor=self.dummy_extractor,
1139
- )
1140
-
1141
- sd.enable_model_cpu_offload()
1142
-
1143
- logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
1144
- with CaptureLogger(logger) as cap_logger:
1145
- sd.to("cuda")
1146
-
1147
- assert "It is strongly recommended against doing so" in str(cap_logger)
1148
-
1149
- sd = StableDiffusionPipeline(
1150
- unet=unet,
1151
- scheduler=scheduler,
1152
- vae=vae,
1153
- text_encoder=bert,
1154
- tokenizer=tokenizer,
1155
- safety_checker=None,
1156
- feature_extractor=self.dummy_extractor,
1157
- )
1158
-
1159
- def test_set_scheduler(self):
1160
- unet = self.dummy_cond_unet()
1161
- scheduler = PNDMScheduler(skip_prk_steps=True)
1162
- vae = self.dummy_vae
1163
- bert = self.dummy_text_encoder
1164
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
1165
-
1166
- sd = StableDiffusionPipeline(
1167
- unet=unet,
1168
- scheduler=scheduler,
1169
- vae=vae,
1170
- text_encoder=bert,
1171
- tokenizer=tokenizer,
1172
- safety_checker=None,
1173
- feature_extractor=self.dummy_extractor,
1174
- )
1175
-
1176
- sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config)
1177
- assert isinstance(sd.scheduler, DDIMScheduler)
1178
- sd.scheduler = DDPMScheduler.from_config(sd.scheduler.config)
1179
- assert isinstance(sd.scheduler, DDPMScheduler)
1180
- sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config)
1181
- assert isinstance(sd.scheduler, PNDMScheduler)
1182
- sd.scheduler = LMSDiscreteScheduler.from_config(sd.scheduler.config)
1183
- assert isinstance(sd.scheduler, LMSDiscreteScheduler)
1184
- sd.scheduler = EulerDiscreteScheduler.from_config(sd.scheduler.config)
1185
- assert isinstance(sd.scheduler, EulerDiscreteScheduler)
1186
- sd.scheduler = EulerAncestralDiscreteScheduler.from_config(sd.scheduler.config)
1187
- assert isinstance(sd.scheduler, EulerAncestralDiscreteScheduler)
1188
- sd.scheduler = DPMSolverMultistepScheduler.from_config(sd.scheduler.config)
1189
- assert isinstance(sd.scheduler, DPMSolverMultistepScheduler)
1190
-
1191
- def test_set_component_to_none(self):
1192
- unet = self.dummy_cond_unet()
1193
- scheduler = PNDMScheduler(skip_prk_steps=True)
1194
- vae = self.dummy_vae
1195
- bert = self.dummy_text_encoder
1196
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
1197
-
1198
- pipeline = StableDiffusionPipeline(
1199
- unet=unet,
1200
- scheduler=scheduler,
1201
- vae=vae,
1202
- text_encoder=bert,
1203
- tokenizer=tokenizer,
1204
- safety_checker=None,
1205
- feature_extractor=self.dummy_extractor,
1206
- )
1207
-
1208
- generator = torch.Generator(device="cpu").manual_seed(0)
1209
-
1210
- prompt = "This is a flower"
1211
-
1212
- out_image = pipeline(
1213
- prompt=prompt,
1214
- generator=generator,
1215
- num_inference_steps=1,
1216
- output_type="np",
1217
- ).images
1218
-
1219
- pipeline.feature_extractor = None
1220
- generator = torch.Generator(device="cpu").manual_seed(0)
1221
- out_image_2 = pipeline(
1222
- prompt=prompt,
1223
- generator=generator,
1224
- num_inference_steps=1,
1225
- output_type="np",
1226
- ).images
1227
-
1228
- assert out_image.shape == (1, 64, 64, 3)
1229
- assert np.abs(out_image - out_image_2).max() < 1e-3
1230
-
1231
- def test_set_scheduler_consistency(self):
1232
- unet = self.dummy_cond_unet()
1233
- pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
1234
- ddim = DDIMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
1235
- vae = self.dummy_vae
1236
- bert = self.dummy_text_encoder
1237
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
1238
-
1239
- sd = StableDiffusionPipeline(
1240
- unet=unet,
1241
- scheduler=pndm,
1242
- vae=vae,
1243
- text_encoder=bert,
1244
- tokenizer=tokenizer,
1245
- safety_checker=None,
1246
- feature_extractor=self.dummy_extractor,
1247
- )
1248
-
1249
- pndm_config = sd.scheduler.config
1250
- sd.scheduler = DDPMScheduler.from_config(pndm_config)
1251
- sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config)
1252
- pndm_config_2 = sd.scheduler.config
1253
- pndm_config_2 = {k: v for k, v in pndm_config_2.items() if k in pndm_config}
1254
-
1255
- assert dict(pndm_config) == dict(pndm_config_2)
1256
-
1257
- sd = StableDiffusionPipeline(
1258
- unet=unet,
1259
- scheduler=ddim,
1260
- vae=vae,
1261
- text_encoder=bert,
1262
- tokenizer=tokenizer,
1263
- safety_checker=None,
1264
- feature_extractor=self.dummy_extractor,
1265
- )
1266
-
1267
- ddim_config = sd.scheduler.config
1268
- sd.scheduler = LMSDiscreteScheduler.from_config(ddim_config)
1269
- sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config)
1270
- ddim_config_2 = sd.scheduler.config
1271
- ddim_config_2 = {k: v for k, v in ddim_config_2.items() if k in ddim_config}
1272
-
1273
- assert dict(ddim_config) == dict(ddim_config_2)
1274
-
1275
- def test_save_safe_serialization(self):
1276
- pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
1277
- with tempfile.TemporaryDirectory() as tmpdirname:
1278
- pipeline.save_pretrained(tmpdirname, safe_serialization=True)
1279
-
1280
- # Validate that the VAE safetensor exists and are of the correct format
1281
- vae_path = os.path.join(tmpdirname, "vae", "diffusion_pytorch_model.safetensors")
1282
- assert os.path.exists(vae_path), f"Could not find {vae_path}"
1283
- _ = safetensors.torch.load_file(vae_path)
1284
-
1285
- # Validate that the UNet safetensor exists and are of the correct format
1286
- unet_path = os.path.join(tmpdirname, "unet", "diffusion_pytorch_model.safetensors")
1287
- assert os.path.exists(unet_path), f"Could not find {unet_path}"
1288
- _ = safetensors.torch.load_file(unet_path)
1289
-
1290
- # Validate that the text encoder safetensor exists and are of the correct format
1291
- text_encoder_path = os.path.join(tmpdirname, "text_encoder", "model.safetensors")
1292
- assert os.path.exists(text_encoder_path), f"Could not find {text_encoder_path}"
1293
- _ = safetensors.torch.load_file(text_encoder_path)
1294
-
1295
- pipeline = StableDiffusionPipeline.from_pretrained(tmpdirname)
1296
- assert pipeline.unet is not None
1297
- assert pipeline.vae is not None
1298
- assert pipeline.text_encoder is not None
1299
- assert pipeline.scheduler is not None
1300
- assert pipeline.feature_extractor is not None
1301
-
1302
- def test_no_pytorch_download_when_doing_safetensors(self):
1303
- # by default we don't download
1304
- with tempfile.TemporaryDirectory() as tmpdirname:
1305
- _ = StableDiffusionPipeline.from_pretrained(
1306
- "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname
1307
- )
1308
-
1309
- path = os.path.join(
1310
- tmpdirname,
1311
- "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all",
1312
- "snapshots",
1313
- "07838d72e12f9bcec1375b0482b80c1d399be843",
1314
- "unet",
1315
- )
1316
- # safetensors exists
1317
- assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors"))
1318
- # pytorch does not
1319
- assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin"))
1320
-
1321
- def test_no_safetensors_download_when_doing_pytorch(self):
1322
- # mock diffusers safetensors not available
1323
- import diffusers
1324
-
1325
- diffusers.utils.import_utils._safetensors_available = False
1326
-
1327
- with tempfile.TemporaryDirectory() as tmpdirname:
1328
- _ = StableDiffusionPipeline.from_pretrained(
1329
- "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname
1330
- )
1331
-
1332
- path = os.path.join(
1333
- tmpdirname,
1334
- "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all",
1335
- "snapshots",
1336
- "07838d72e12f9bcec1375b0482b80c1d399be843",
1337
- "unet",
1338
- )
1339
- # safetensors does not exists
1340
- assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors"))
1341
- # pytorch does
1342
- assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin"))
1343
-
1344
- diffusers.utils.import_utils._safetensors_available = True
1345
-
1346
- def test_optional_components(self):
1347
- unet = self.dummy_cond_unet()
1348
- pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
1349
- vae = self.dummy_vae
1350
- bert = self.dummy_text_encoder
1351
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
1352
-
1353
- orig_sd = StableDiffusionPipeline(
1354
- unet=unet,
1355
- scheduler=pndm,
1356
- vae=vae,
1357
- text_encoder=bert,
1358
- tokenizer=tokenizer,
1359
- safety_checker=unet,
1360
- feature_extractor=self.dummy_extractor,
1361
- )
1362
- sd = orig_sd
1363
-
1364
- assert sd.config.requires_safety_checker is True
1365
-
1366
- with tempfile.TemporaryDirectory() as tmpdirname:
1367
- sd.save_pretrained(tmpdirname)
1368
-
1369
- # Test that passing None works
1370
- sd = StableDiffusionPipeline.from_pretrained(
1371
- tmpdirname, feature_extractor=None, safety_checker=None, requires_safety_checker=False
1372
- )
1373
-
1374
- assert sd.config.requires_safety_checker is False
1375
- assert sd.config.safety_checker == (None, None)
1376
- assert sd.config.feature_extractor == (None, None)
1377
-
1378
- with tempfile.TemporaryDirectory() as tmpdirname:
1379
- sd.save_pretrained(tmpdirname)
1380
-
1381
- # Test that loading previous None works
1382
- sd = StableDiffusionPipeline.from_pretrained(tmpdirname)
1383
-
1384
- assert sd.config.requires_safety_checker is False
1385
- assert sd.config.safety_checker == (None, None)
1386
- assert sd.config.feature_extractor == (None, None)
1387
-
1388
- orig_sd.save_pretrained(tmpdirname)
1389
-
1390
- # Test that loading without any directory works
1391
- shutil.rmtree(os.path.join(tmpdirname, "safety_checker"))
1392
- with open(os.path.join(tmpdirname, sd.config_name)) as f:
1393
- config = json.load(f)
1394
- config["safety_checker"] = [None, None]
1395
- with open(os.path.join(tmpdirname, sd.config_name), "w") as f:
1396
- json.dump(config, f)
1397
-
1398
- sd = StableDiffusionPipeline.from_pretrained(tmpdirname, requires_safety_checker=False)
1399
- sd.save_pretrained(tmpdirname)
1400
- sd = StableDiffusionPipeline.from_pretrained(tmpdirname)
1401
-
1402
- assert sd.config.requires_safety_checker is False
1403
- assert sd.config.safety_checker == (None, None)
1404
- assert sd.config.feature_extractor == (None, None)
1405
-
1406
- # Test that loading from deleted model index works
1407
- with open(os.path.join(tmpdirname, sd.config_name)) as f:
1408
- config = json.load(f)
1409
- del config["safety_checker"]
1410
- del config["feature_extractor"]
1411
- with open(os.path.join(tmpdirname, sd.config_name), "w") as f:
1412
- json.dump(config, f)
1413
-
1414
- sd = StableDiffusionPipeline.from_pretrained(tmpdirname)
1415
-
1416
- assert sd.config.requires_safety_checker is False
1417
- assert sd.config.safety_checker == (None, None)
1418
- assert sd.config.feature_extractor == (None, None)
1419
-
1420
- with tempfile.TemporaryDirectory() as tmpdirname:
1421
- sd.save_pretrained(tmpdirname)
1422
-
1423
- # Test that partially loading works
1424
- sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor)
1425
-
1426
- assert sd.config.requires_safety_checker is False
1427
- assert sd.config.safety_checker == (None, None)
1428
- assert sd.config.feature_extractor != (None, None)
1429
-
1430
- # Test that partially loading works
1431
- sd = StableDiffusionPipeline.from_pretrained(
1432
- tmpdirname,
1433
- feature_extractor=self.dummy_extractor,
1434
- safety_checker=unet,
1435
- requires_safety_checker=[True, True],
1436
- )
1437
-
1438
- assert sd.config.requires_safety_checker == [True, True]
1439
- assert sd.config.safety_checker != (None, None)
1440
- assert sd.config.feature_extractor != (None, None)
1441
-
1442
- with tempfile.TemporaryDirectory() as tmpdirname:
1443
- sd.save_pretrained(tmpdirname)
1444
- sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor)
1445
-
1446
- assert sd.config.requires_safety_checker == [True, True]
1447
- assert sd.config.safety_checker != (None, None)
1448
- assert sd.config.feature_extractor != (None, None)
1449
-
1450
- def test_name_or_path(self):
1451
- model_path = "hf-internal-testing/tiny-stable-diffusion-torch"
1452
- sd = DiffusionPipeline.from_pretrained(model_path)
1453
-
1454
- assert sd.name_or_path == model_path
1455
-
1456
- with tempfile.TemporaryDirectory() as tmpdirname:
1457
- sd.save_pretrained(tmpdirname)
1458
- sd = DiffusionPipeline.from_pretrained(tmpdirname)
1459
-
1460
- assert sd.name_or_path == tmpdirname
1461
-
1462
- def test_warning_no_variant_available(self):
1463
- variant = "fp16"
1464
- with self.assertWarns(FutureWarning) as warning_context:
1465
- cached_folder = StableDiffusionPipeline.download(
1466
- "hf-internal-testing/diffusers-stable-diffusion-tiny-all", variant=variant
1467
- )
1468
-
1469
- assert "but no such modeling files are available" in str(warning_context.warning)
1470
- assert variant in str(warning_context.warning)
1471
-
1472
- def get_all_filenames(directory):
1473
- filenames = glob.glob(directory + "/**", recursive=True)
1474
- filenames = [f for f in filenames if os.path.isfile(f)]
1475
- return filenames
1476
-
1477
- filenames = get_all_filenames(str(cached_folder))
1478
-
1479
- all_model_files, variant_model_files = variant_compatible_siblings(filenames, variant=variant)
1480
-
1481
- # make sure that none of the model names are variant model names
1482
- assert len(variant_model_files) == 0
1483
- assert len(all_model_files) > 0
1484
-
1485
-
1486
- @slow
1487
- @require_torch_gpu
1488
- class PipelineSlowTests(unittest.TestCase):
1489
- def tearDown(self):
1490
- # clean up the VRAM after each test
1491
- super().tearDown()
1492
- gc.collect()
1493
- torch.cuda.empty_cache()
1494
-
1495
- def test_smart_download(self):
1496
- model_id = "hf-internal-testing/unet-pipeline-dummy"
1497
- with tempfile.TemporaryDirectory() as tmpdirname:
1498
- _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True)
1499
- local_repo_name = "--".join(["models"] + model_id.split("/"))
1500
- snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots")
1501
- snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0])
1502
-
1503
- # inspect all downloaded files to make sure that everything is included
1504
- assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name))
1505
- assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME))
1506
- assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME))
1507
- assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME))
1508
- assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME))
1509
- assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
1510
- assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
1511
- # let's make sure the super large numpy file:
1512
- # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy
1513
- # is not downloaded, but all the expected ones
1514
- assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy"))
1515
-
1516
- def test_warning_unused_kwargs(self):
1517
- model_id = "hf-internal-testing/unet-pipeline-dummy"
1518
- logger = logging.get_logger("diffusers.pipelines")
1519
- with tempfile.TemporaryDirectory() as tmpdirname:
1520
- with CaptureLogger(logger) as cap_logger:
1521
- DiffusionPipeline.from_pretrained(
1522
- model_id,
1523
- not_used=True,
1524
- cache_dir=tmpdirname,
1525
- force_download=True,
1526
- )
1527
-
1528
- assert (
1529
- cap_logger.out.strip().split("\n")[-1]
1530
- == "Keyword arguments {'not_used': True} are not expected by DDPMPipeline and will be ignored."
1531
- )
1532
-
1533
- def test_from_save_pretrained(self):
1534
- # 1. Load models
1535
- model = UNet2DModel(
1536
- block_out_channels=(32, 64),
1537
- layers_per_block=2,
1538
- sample_size=32,
1539
- in_channels=3,
1540
- out_channels=3,
1541
- down_block_types=("DownBlock2D", "AttnDownBlock2D"),
1542
- up_block_types=("AttnUpBlock2D", "UpBlock2D"),
1543
- )
1544
- scheduler = DDPMScheduler(num_train_timesteps=10)
1545
-
1546
- ddpm = DDPMPipeline(model, scheduler)
1547
- ddpm.to(torch_device)
1548
- ddpm.set_progress_bar_config(disable=None)
1549
-
1550
- with tempfile.TemporaryDirectory() as tmpdirname:
1551
- ddpm.save_pretrained(tmpdirname)
1552
- new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
1553
- new_ddpm.to(torch_device)
1554
-
1555
- generator = torch.Generator(device=torch_device).manual_seed(0)
1556
- image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
1557
-
1558
- generator = torch.Generator(device=torch_device).manual_seed(0)
1559
- new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
1560
-
1561
- assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"
1562
-
1563
- @require_torch_2
1564
- def test_from_save_pretrained_dynamo(self):
1565
- run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=None)
1566
-
1567
- def test_from_pretrained_hub(self):
1568
- model_path = "google/ddpm-cifar10-32"
1569
-
1570
- scheduler = DDPMScheduler(num_train_timesteps=10)
1571
-
1572
- ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler)
1573
- ddpm = ddpm.to(torch_device)
1574
- ddpm.set_progress_bar_config(disable=None)
1575
-
1576
- ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
1577
- ddpm_from_hub = ddpm_from_hub.to(torch_device)
1578
- ddpm_from_hub.set_progress_bar_config(disable=None)
1579
-
1580
- generator = torch.Generator(device=torch_device).manual_seed(0)
1581
- image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
1582
-
1583
- generator = torch.Generator(device=torch_device).manual_seed(0)
1584
- new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images
1585
-
1586
- assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"
1587
-
1588
- def test_from_pretrained_hub_pass_model(self):
1589
- model_path = "google/ddpm-cifar10-32"
1590
-
1591
- scheduler = DDPMScheduler(num_train_timesteps=10)
1592
-
1593
- # pass unet into DiffusionPipeline
1594
- unet = UNet2DModel.from_pretrained(model_path)
1595
- ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler)
1596
- ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device)
1597
- ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
1598
-
1599
- ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
1600
- ddpm_from_hub = ddpm_from_hub.to(torch_device)
1601
- ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
1602
-
1603
- generator = torch.Generator(device=torch_device).manual_seed(0)
1604
- image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="numpy").images
1605
-
1606
- generator = torch.Generator(device=torch_device).manual_seed(0)
1607
- new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images
1608
-
1609
- assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"
1610
-
1611
- def test_output_format(self):
1612
- model_path = "google/ddpm-cifar10-32"
1613
-
1614
- scheduler = DDIMScheduler.from_pretrained(model_path)
1615
- pipe = DDIMPipeline.from_pretrained(model_path, scheduler=scheduler)
1616
- pipe.to(torch_device)
1617
- pipe.set_progress_bar_config(disable=None)
1618
-
1619
- images = pipe(output_type="numpy").images
1620
- assert images.shape == (1, 32, 32, 3)
1621
- assert isinstance(images, np.ndarray)
1622
-
1623
- images = pipe(output_type="pil", num_inference_steps=4).images
1624
- assert isinstance(images, list)
1625
- assert len(images) == 1
1626
- assert isinstance(images[0], PIL.Image.Image)
1627
-
1628
- # use PIL by default
1629
- images = pipe(num_inference_steps=4).images
1630
- assert isinstance(images, list)
1631
- assert isinstance(images[0], PIL.Image.Image)
1632
-
1633
- @require_flax
1634
- def test_from_flax_from_pt(self):
1635
- pipe_pt = StableDiffusionPipeline.from_pretrained(
1636
- "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
1637
- )
1638
- pipe_pt.to(torch_device)
1639
-
1640
- from diffusers import FlaxStableDiffusionPipeline
1641
-
1642
- with tempfile.TemporaryDirectory() as tmpdirname:
1643
- pipe_pt.save_pretrained(tmpdirname)
1644
-
1645
- pipe_flax, params = FlaxStableDiffusionPipeline.from_pretrained(
1646
- tmpdirname, safety_checker=None, from_pt=True
1647
- )
1648
-
1649
- with tempfile.TemporaryDirectory() as tmpdirname:
1650
- pipe_flax.save_pretrained(tmpdirname, params=params)
1651
- pipe_pt_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None, from_flax=True)
1652
- pipe_pt_2.to(torch_device)
1653
-
1654
- prompt = "Hello"
1655
-
1656
- generator = torch.manual_seed(0)
1657
- image_0 = pipe_pt(
1658
- [prompt],
1659
- generator=generator,
1660
- num_inference_steps=2,
1661
- output_type="np",
1662
- ).images[0]
1663
-
1664
- generator = torch.manual_seed(0)
1665
- image_1 = pipe_pt_2(
1666
- [prompt],
1667
- generator=generator,
1668
- num_inference_steps=2,
1669
- output_type="np",
1670
- ).images[0]
1671
-
1672
- assert np.abs(image_0 - image_1).sum() < 1e-5, "Models don't give the same forward pass"
1673
-
1674
- @require_compel
1675
- def test_weighted_prompts_compel(self):
1676
- from compel import Compel
1677
-
1678
- pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
1679
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
1680
- pipe.enable_model_cpu_offload()
1681
- pipe.enable_attention_slicing()
1682
-
1683
- compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
1684
-
1685
- prompt = "a red cat playing with a ball{}"
1686
-
1687
- prompts = [prompt.format(s) for s in ["", "++", "--"]]
1688
-
1689
- prompt_embeds = compel(prompts)
1690
-
1691
- generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])]
1692
-
1693
- images = pipe(
1694
- prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="numpy"
1695
- ).images
1696
-
1697
- for i, image in enumerate(images):
1698
- expected_image = load_numpy(
1699
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
1700
- f"/compel/forest_{i}.npy"
1701
- )
1702
-
1703
- assert np.abs(image - expected_image).max() < 3e-1
1704
-
1705
-
1706
- @nightly
1707
- @require_torch_gpu
1708
- class PipelineNightlyTests(unittest.TestCase):
1709
- def tearDown(self):
1710
- # clean up the VRAM after each test
1711
- super().tearDown()
1712
- gc.collect()
1713
- torch.cuda.empty_cache()
1714
-
1715
- def test_ddpm_ddim_equality_batched(self):
1716
- seed = 0
1717
- model_id = "google/ddpm-cifar10-32"
1718
-
1719
- unet = UNet2DModel.from_pretrained(model_id)
1720
- ddpm_scheduler = DDPMScheduler()
1721
- ddim_scheduler = DDIMScheduler()
1722
-
1723
- ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
1724
- ddpm.to(torch_device)
1725
- ddpm.set_progress_bar_config(disable=None)
1726
-
1727
- ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
1728
- ddim.to(torch_device)
1729
- ddim.set_progress_bar_config(disable=None)
1730
-
1731
- generator = torch.Generator(device=torch_device).manual_seed(seed)
1732
- ddpm_images = ddpm(batch_size=2, generator=generator, output_type="numpy").images
1733
-
1734
- generator = torch.Generator(device=torch_device).manual_seed(seed)
1735
- ddim_images = ddim(
1736
- batch_size=2,
1737
- generator=generator,
1738
- num_inference_steps=1000,
1739
- eta=1.0,
1740
- output_type="numpy",
1741
- use_clipped_model_output=True, # Need this to make DDIM match DDPM
1742
- ).images
1743
-
1744
- # the values aren't exactly equal, but the images look the same visually
1745
- assert np.abs(ddpm_images - ddim_images).max() < 1e-1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/mask/mask_target.py DELETED
@@ -1,122 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from torch.nn.modules.utils import _pair
4
-
5
-
6
- def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
7
- cfg):
8
- """Compute mask target for positive proposals in multiple images.
9
-
10
- Args:
11
- pos_proposals_list (list[Tensor]): Positive proposals in multiple
12
- images.
13
- pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
14
- positive proposals.
15
- gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
16
- each image.
17
- cfg (dict): Config dict that specifies the mask size.
18
-
19
- Returns:
20
- list[Tensor]: Mask target of each image.
21
-
22
- Example:
23
- >>> import mmcv
24
- >>> import mmdet
25
- >>> from mmdet.core.mask import BitmapMasks
26
- >>> from mmdet.core.mask.mask_target import *
27
- >>> H, W = 17, 18
28
- >>> cfg = mmcv.Config({'mask_size': (13, 14)})
29
- >>> rng = np.random.RandomState(0)
30
- >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
31
- >>> pos_proposals_list = [
32
- >>> torch.Tensor([
33
- >>> [ 7.2425, 5.5929, 13.9414, 14.9541],
34
- >>> [ 7.3241, 3.6170, 16.3850, 15.3102],
35
- >>> ]),
36
- >>> torch.Tensor([
37
- >>> [ 4.8448, 6.4010, 7.0314, 9.7681],
38
- >>> [ 5.9790, 2.6989, 7.4416, 4.8580],
39
- >>> [ 0.0000, 0.0000, 0.1398, 9.8232],
40
- >>> ]),
41
- >>> ]
42
- >>> # Corresponding class index for each proposal for each image
43
- >>> pos_assigned_gt_inds_list = [
44
- >>> torch.LongTensor([7, 0]),
45
- >>> torch.LongTensor([5, 4, 1]),
46
- >>> ]
47
- >>> # Ground truth mask for each true object for each image
48
- >>> gt_masks_list = [
49
- >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W),
50
- >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W),
51
- >>> ]
52
- >>> mask_targets = mask_target(
53
- >>> pos_proposals_list, pos_assigned_gt_inds_list,
54
- >>> gt_masks_list, cfg)
55
- >>> assert mask_targets.shape == (5,) + cfg['mask_size']
56
- """
57
- cfg_list = [cfg for _ in range(len(pos_proposals_list))]
58
- mask_targets = map(mask_target_single, pos_proposals_list,
59
- pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
60
- mask_targets = list(mask_targets)
61
- if len(mask_targets) > 0:
62
- mask_targets = torch.cat(mask_targets)
63
- return mask_targets
64
-
65
-
66
- def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
67
- """Compute mask target for each positive proposal in the image.
68
-
69
- Args:
70
- pos_proposals (Tensor): Positive proposals.
71
- pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
72
- gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
73
- or Polygon.
74
- cfg (dict): Config dict that indicate the mask size.
75
-
76
- Returns:
77
- Tensor: Mask target of each positive proposals in the image.
78
-
79
- Example:
80
- >>> import mmcv
81
- >>> import mmdet
82
- >>> from mmdet.core.mask import BitmapMasks
83
- >>> from mmdet.core.mask.mask_target import * # NOQA
84
- >>> H, W = 32, 32
85
- >>> cfg = mmcv.Config({'mask_size': (7, 11)})
86
- >>> rng = np.random.RandomState(0)
87
- >>> # Masks for each ground truth box (relative to the image)
88
- >>> gt_masks_data = rng.rand(3, H, W)
89
- >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
90
- >>> # Predicted positive boxes in one image
91
- >>> pos_proposals = torch.FloatTensor([
92
- >>> [ 16.2, 5.5, 19.9, 20.9],
93
- >>> [ 17.3, 13.6, 19.3, 19.3],
94
- >>> [ 14.8, 16.4, 17.0, 23.7],
95
- >>> [ 0.0, 0.0, 16.0, 16.0],
96
- >>> [ 4.0, 0.0, 20.0, 16.0],
97
- >>> ])
98
- >>> # For each predicted proposal, its assignment to a gt mask
99
- >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
100
- >>> mask_targets = mask_target_single(
101
- >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
102
- >>> assert mask_targets.shape == (5,) + cfg['mask_size']
103
- """
104
- device = pos_proposals.device
105
- mask_size = _pair(cfg.mask_size)
106
- num_pos = pos_proposals.size(0)
107
- if num_pos > 0:
108
- proposals_np = pos_proposals.cpu().numpy()
109
- maxh, maxw = gt_masks.height, gt_masks.width
110
- proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
111
- proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
112
- pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
113
-
114
- mask_targets = gt_masks.crop_and_resize(
115
- proposals_np, mask_size, device=device,
116
- inds=pos_assigned_gt_inds).to_ndarray()
117
-
118
- mask_targets = torch.from_numpy(mask_targets).float().to(device)
119
- else:
120
- mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
121
-
122
- return mask_targets
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/__init__.py DELETED
@@ -1,42 +0,0 @@
1
- # Midas Depth Estimation
2
- # From https://github.com/isl-org/MiDaS
3
- # MIT LICENSE
4
-
5
- import cv2
6
- import numpy as np
7
- import torch
8
-
9
- from einops import rearrange
10
- from .api import MiDaSInference
11
-
12
-
13
- class MidasDetector:
14
- def __init__(self):
15
- self.model = MiDaSInference(model_type="dpt_hybrid").cuda()
16
-
17
- def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1):
18
- assert input_image.ndim == 3
19
- image_depth = input_image
20
- with torch.no_grad():
21
- image_depth = torch.from_numpy(image_depth).float().cuda()
22
- image_depth = image_depth / 127.5 - 1.0
23
- image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
24
- depth = self.model(image_depth)[0]
25
-
26
- depth_pt = depth.clone()
27
- depth_pt -= torch.min(depth_pt)
28
- depth_pt /= torch.max(depth_pt)
29
- depth_pt = depth_pt.cpu().numpy()
30
- depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
31
-
32
- depth_np = depth.cpu().numpy()
33
- x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
34
- y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
35
- z = np.ones_like(x) * a
36
- x[depth_pt < bg_th] = 0
37
- y[depth_pt < bg_th] = 0
38
- normal = np.stack([x, y, z], axis=2)
39
- normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
40
- normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
41
-
42
- return depth_image, normal_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/modules/streaming.py DELETED
@@ -1,135 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Streaming module API that should be implemented by all Streaming components,
9
- """
10
-
11
- from contextlib import contextmanager
12
- import typing as tp
13
- from torch import nn
14
- import torch
15
-
16
-
17
- State = tp.Dict[str, torch.Tensor]
18
-
19
-
20
- class StreamingModule(nn.Module):
21
- """Common API for streaming components.
22
-
23
- Each streaming component has a streaming state, which is just a dict[str, Tensor].
24
- By convention, the first dim of each tensor must be the batch size.
25
- Don't use dots in the key names, as this would clash with submodules
26
- (like in state_dict).
27
-
28
- If `self._is_streaming` is True, the component should use and remember
29
- the proper state inside `self._streaming_state`.
30
-
31
- To set a streaming component in streaming state, use
32
-
33
- with module.streaming():
34
- ...
35
-
36
- This will automatically reset the streaming state when exiting the context manager.
37
- This also automatically propagates to all streaming children module.
38
-
39
- Some module might also implement the `StreamingModule.flush` method, although
40
- this one is trickier, as all parents module must be StreamingModule and implement
41
- it as well for it to work properly. See `StreamingSequential` after.
42
- """
43
- def __init__(self) -> None:
44
- super().__init__()
45
- self._streaming_state: State = {}
46
- self._is_streaming = False
47
-
48
- def _apply_named_streaming(self, fn: tp.Any):
49
- for name, module in self.named_modules():
50
- if isinstance(module, StreamingModule):
51
- fn(name, module)
52
-
53
- def _set_streaming(self, streaming: bool):
54
- def _set_streaming(name, module):
55
- module._is_streaming = streaming
56
- self._apply_named_streaming(_set_streaming)
57
-
58
- @contextmanager
59
- def streaming(self):
60
- """Context manager to enter streaming mode. Reset streaming state on exit.
61
- """
62
- self._set_streaming(True)
63
- try:
64
- yield
65
- finally:
66
- self._set_streaming(False)
67
- self.reset_streaming()
68
-
69
- def reset_streaming(self):
70
- """Reset the streaming state.
71
- """
72
- def _reset(name: str, module: StreamingModule):
73
- module._streaming_state.clear()
74
-
75
- self._apply_named_streaming(_reset)
76
-
77
- def get_streaming_state(self) -> State:
78
- """Return the streaming state, including that of sub-modules.
79
- """
80
- state: State = {}
81
-
82
- def _add(name: str, module: StreamingModule):
83
- if name:
84
- name += "."
85
- for key, value in module._streaming_state.items():
86
- state[name + key] = value
87
-
88
- self._apply_named_streaming(_add)
89
- return state
90
-
91
- def set_streaming_state(self, state: State):
92
- """Set the streaming state, including that of sub-modules.
93
- """
94
- state = dict(state)
95
-
96
- def _set(name: str, module: StreamingModule):
97
- if name:
98
- name += "."
99
- module._streaming_state.clear()
100
- for key, value in list(state.items()):
101
- # complexity is not ideal here, but probably fine.
102
- if key.startswith(name):
103
- local_key = key[len(name):]
104
- if '.' not in local_key:
105
- module._streaming_state[local_key] = value
106
- del state[key]
107
-
108
- self._apply_named_streaming(_set)
109
- assert len(state) == 0, list(state.keys())
110
-
111
- def flush(self, x: tp.Optional[torch.Tensor] = None):
112
- """Flush any remaining outputs that were waiting for completion.
113
- Typically, for convolutions, this will add the final padding
114
- and process the last buffer.
115
-
116
- This should take an optional argument `x`, which will be provided
117
- if a module before this one in the streaming pipeline has already
118
- spitted out a flushed out buffer.
119
- """
120
- if x is None:
121
- return None
122
- else:
123
- return self(x)
124
-
125
-
126
- class StreamingSequential(StreamingModule, nn.Sequential):
127
- """A streaming compatible alternative of `nn.Sequential`.
128
- """
129
- def flush(self, x: tp.Optional[torch.Tensor] = None):
130
- for module in self:
131
- if isinstance(module, StreamingModule):
132
- x = module.flush(x)
133
- elif x is not None:
134
- x = module(x)
135
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtyomKhyan/Detection/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Detection
3
- emoji: 🏢
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.50.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avin1221/darkstorm2150-Protogen_x3.4_Official_Release/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/darkstorm2150/Protogen_x3.4_Official_Release").launch()
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/data_utils.py DELETED
@@ -1,328 +0,0 @@
1
- import time
2
- import os
3
- import random
4
- import numpy as np
5
- import torch
6
- import torch.utils.data
7
- import commons
8
- from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch
9
- from utils import load_wav_to_torch, load_filepaths_and_text
10
- from text import cleaned_text_to_sequence, get_bert
11
-
12
- """Multi speaker version"""
13
-
14
-
15
- class TextAudioSpeakerLoader(torch.utils.data.Dataset):
16
- """
17
- 1) loads audio, speaker_id, text pairs
18
- 2) normalizes text and converts them to sequences of integers
19
- 3) computes spectrograms from audio files.
20
- """
21
-
22
- def __init__(self, audiopaths_sid_text, hparams):
23
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
24
- self.max_wav_value = hparams.max_wav_value
25
- self.sampling_rate = hparams.sampling_rate
26
- self.filter_length = hparams.filter_length
27
- self.hop_length = hparams.hop_length
28
- self.win_length = hparams.win_length
29
- self.sampling_rate = hparams.sampling_rate
30
- self.spk_map = hparams.spk2id
31
- self.hparams = hparams
32
-
33
- self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False)
34
- if self.use_mel_spec_posterior:
35
- self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
36
-
37
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
38
-
39
- self.add_blank = hparams.add_blank
40
- self.min_text_len = getattr(hparams, "min_text_len", 1)
41
- self.max_text_len = getattr(hparams, "max_text_len", 300)
42
-
43
- random.seed(1234)
44
- random.shuffle(self.audiopaths_sid_text)
45
- self._filter()
46
-
47
- def _filter(self):
48
- """
49
- Filter text & store spec lengths
50
- """
51
- # Store spectrogram lengths for Bucketing
52
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
53
- # spec_length = wav_length // hop_length
54
-
55
- audiopaths_sid_text_new = []
56
- lengths = []
57
- skipped = 0
58
- for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:
59
- audiopath = f'{_id}'
60
- if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
61
- phones = phones.split(" ")
62
- tone = [int(i) for i in tone.split(" ")]
63
- word2ph = [int(i) for i in word2ph.split(" ")]
64
- audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])
65
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
66
- else:
67
- skipped += 1
68
- print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text))
69
- self.audiopaths_sid_text = audiopaths_sid_text_new
70
- self.lengths = lengths
71
-
72
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
73
- # separate filename, speaker_id and text
74
- audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
75
-
76
- bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)
77
-
78
- spec, wav = self.get_audio(audiopath)
79
- sid = torch.LongTensor([int(self.spk_map[sid])])
80
- return (phones, spec, wav, sid, tone, language, bert)
81
-
82
- def get_audio(self, filename):
83
- audio, sampling_rate = load_wav_to_torch(filename)
84
- if sampling_rate != self.sampling_rate:
85
- raise ValueError("{} {} SR doesn't match target {} SR".format(
86
- sampling_rate, self.sampling_rate))
87
- audio_norm = audio / self.max_wav_value
88
- audio_norm = audio_norm.unsqueeze(0)
89
- spec_filename = filename.replace(".wav", ".spec.pt")
90
- if self.use_mel_spec_posterior:
91
- spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
92
- try:
93
- spec = torch.load(spec_filename)
94
- except:
95
- if self.use_mel_spec_posterior:
96
- # if os.path.exists(filename.replace(".wav", ".spec.pt")):
97
- # # spec, n_fft, num_mels, sampling_rate, fmin, fmax
98
- # spec = spec_to_mel_torch(
99
- # torch.load(filename.replace(".wav", ".spec.pt")),
100
- # self.filter_length, self.n_mel_channels, self.sampling_rate,
101
- # self.hparams.mel_fmin, self.hparams.mel_fmax)
102
- spec = mel_spectrogram_torch(audio_norm, self.filter_length,
103
- self.n_mel_channels, self.sampling_rate, self.hop_length,
104
- self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)
105
- else:
106
- spec = spectrogram_torch(audio_norm, self.filter_length,
107
- self.sampling_rate, self.hop_length, self.win_length,
108
- center=False)
109
- spec = torch.squeeze(spec, 0)
110
- torch.save(spec, spec_filename)
111
- return spec, audio_norm
112
-
113
- def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
114
- # print(text, word2ph,phone, tone, language_str)
115
- pold = phone
116
- w2pho = [i for i in word2ph]
117
- word2ph = [i for i in word2ph]
118
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
119
- pold2 = phone
120
-
121
- if self.add_blank:
122
- p1 = len(phone)
123
- phone = commons.intersperse(phone, 0)
124
- p2 = len(phone)
125
- t1 = len(tone)
126
- tone = commons.intersperse(tone, 0)
127
- t2 = len(tone)
128
- language = commons.intersperse(language, 0)
129
- for i in range(len(word2ph)):
130
- word2ph[i] = word2ph[i] * 2
131
- word2ph[0] += 1
132
- bert_path = wav_path.replace(".wav", ".bert.pt")
133
- try:
134
- bert = torch.load(bert_path)
135
- assert bert.shape[-1] == len(phone)
136
- except:
137
- bert = get_bert(text, word2ph, language_str)
138
- torch.save(bert, bert_path)
139
- #print(bert.shape[-1], bert_path, text, pold)
140
- assert bert.shape[-1] == len(phone)
141
-
142
- assert bert.shape[-1] == len(phone), (
143
- bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)
144
- phone = torch.LongTensor(phone)
145
- tone = torch.LongTensor(tone)
146
- language = torch.LongTensor(language)
147
- return bert, phone, tone, language
148
-
149
- def get_sid(self, sid):
150
- sid = torch.LongTensor([int(sid)])
151
- return sid
152
-
153
- def __getitem__(self, index):
154
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
155
-
156
- def __len__(self):
157
- return len(self.audiopaths_sid_text)
158
-
159
-
160
- class TextAudioSpeakerCollate():
161
- """ Zero-pads model inputs and targets
162
- """
163
-
164
- def __init__(self, return_ids=False):
165
- self.return_ids = return_ids
166
-
167
- def __call__(self, batch):
168
- """Collate's training batch from normalized text, audio and speaker identities
169
- PARAMS
170
- ------
171
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
172
- """
173
- # Right zero-pad all one-hot text sequences to max input length
174
- _, ids_sorted_decreasing = torch.sort(
175
- torch.LongTensor([x[1].size(1) for x in batch]),
176
- dim=0, descending=True)
177
-
178
- max_text_len = max([len(x[0]) for x in batch])
179
- max_spec_len = max([x[1].size(1) for x in batch])
180
- max_wav_len = max([x[2].size(1) for x in batch])
181
-
182
- text_lengths = torch.LongTensor(len(batch))
183
- spec_lengths = torch.LongTensor(len(batch))
184
- wav_lengths = torch.LongTensor(len(batch))
185
- sid = torch.LongTensor(len(batch))
186
-
187
- text_padded = torch.LongTensor(len(batch), max_text_len)
188
- tone_padded = torch.LongTensor(len(batch), max_text_len)
189
- language_padded = torch.LongTensor(len(batch), max_text_len)
190
- bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
191
-
192
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
193
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
194
- text_padded.zero_()
195
- tone_padded.zero_()
196
- language_padded.zero_()
197
- spec_padded.zero_()
198
- wav_padded.zero_()
199
- bert_padded.zero_()
200
- for i in range(len(ids_sorted_decreasing)):
201
- row = batch[ids_sorted_decreasing[i]]
202
-
203
- text = row[0]
204
- text_padded[i, :text.size(0)] = text
205
- text_lengths[i] = text.size(0)
206
-
207
- spec = row[1]
208
- spec_padded[i, :, :spec.size(1)] = spec
209
- spec_lengths[i] = spec.size(1)
210
-
211
- wav = row[2]
212
- wav_padded[i, :, :wav.size(1)] = wav
213
- wav_lengths[i] = wav.size(1)
214
-
215
- sid[i] = row[3]
216
-
217
- tone = row[4]
218
- tone_padded[i, :tone.size(0)] = tone
219
-
220
- language = row[5]
221
- language_padded[i, :language.size(0)] = language
222
-
223
- bert = row[6]
224
- bert_padded[i, :, :bert.size(1)] = bert
225
-
226
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded
227
-
228
-
229
- class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
230
- """
231
- Maintain similar input lengths in a batch.
232
- Length groups are specified by boundaries.
233
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
234
-
235
- It removes samples which are not included in the boundaries.
236
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
237
- """
238
-
239
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
240
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
241
- self.lengths = dataset.lengths
242
- self.batch_size = batch_size
243
- self.boundaries = boundaries
244
-
245
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
246
- self.total_size = sum(self.num_samples_per_bucket)
247
- self.num_samples = self.total_size // self.num_replicas
248
-
249
- def _create_buckets(self):
250
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
251
- for i in range(len(self.lengths)):
252
- length = self.lengths[i]
253
- idx_bucket = self._bisect(length)
254
- if idx_bucket != -1:
255
- buckets[idx_bucket].append(i)
256
-
257
- for i in range(len(buckets) - 1, 0, -1):
258
- if len(buckets[i]) == 0:
259
- buckets.pop(i)
260
- self.boundaries.pop(i + 1)
261
-
262
- num_samples_per_bucket = []
263
- for i in range(len(buckets)):
264
- len_bucket = len(buckets[i])
265
- total_batch_size = self.num_replicas * self.batch_size
266
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
267
- num_samples_per_bucket.append(len_bucket + rem)
268
- return buckets, num_samples_per_bucket
269
-
270
- def __iter__(self):
271
- # deterministically shuffle based on epoch
272
- g = torch.Generator()
273
- g.manual_seed(self.epoch)
274
-
275
- indices = []
276
- if self.shuffle:
277
- for bucket in self.buckets:
278
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
279
- else:
280
- for bucket in self.buckets:
281
- indices.append(list(range(len(bucket))))
282
-
283
- batches = []
284
- for i in range(len(self.buckets)):
285
- bucket = self.buckets[i]
286
- len_bucket = len(bucket)
287
- if (len_bucket == 0):
288
- continue
289
- ids_bucket = indices[i]
290
- num_samples_bucket = self.num_samples_per_bucket[i]
291
-
292
- # add extra samples to make it evenly divisible
293
- rem = num_samples_bucket - len_bucket
294
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
295
-
296
- # subsample
297
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
298
-
299
- # batching
300
- for j in range(len(ids_bucket) // self.batch_size):
301
- batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
302
- batches.append(batch)
303
-
304
- if self.shuffle:
305
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
306
- batches = [batches[i] for i in batch_ids]
307
- self.batches = batches
308
-
309
- assert len(self.batches) * self.batch_size == self.num_samples
310
- return iter(self.batches)
311
-
312
- def _bisect(self, x, lo=0, hi=None):
313
- if hi is None:
314
- hi = len(self.boundaries) - 1
315
-
316
- if hi > lo:
317
- mid = (hi + lo) // 2
318
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
319
- return mid
320
- elif x <= self.boundaries[mid]:
321
- return self._bisect(x, lo, mid)
322
- else:
323
- return self._bisect(x, mid + 1, hi)
324
- else:
325
- return -1
326
-
327
- def __len__(self):
328
- return self.num_samples // self.batch_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cuentos De Espacio Mutante Blobs Ataque Ps Vita.md DELETED
@@ -1,127 +0,0 @@
1
-
2
- <h1>Cuentos desde el espacio: Mutant Blobs Attack - Un divertido juego de plataformas para PS Vita</h1>
3
- <h2>Introducción</h2>
4
- <p>¿Te gustan los juegos de plataformas? ¿Tienes un PS Vita o estás pensando en comprar uno? Si respondiste sí a ambas preguntas, entonces definitivamente deberías revisar Tales from Space: Mutant Blobs Attack. Es uno de los mejores juegos para PS Vita que puedes descargar desde PlayStation Store.</p>
5
- <h3>¿Qué es Cuentos del Espacio: Ataque de Manchas Mutantes? </h3>
6
- <p>Tales from Space: Mutant Blobs Attack es un juego de plataformas de desplazamiento lateral desarrollado y publicado por DrinkBox Studios en 2012. Es la secuela de Tales from Space: About a Blob, lanzado en 2011. </p>
7
- <h2>cuentos de espacio mutante blobs ataque ps vita</h2><br /><p><b><b>Download Zip</b> &#127379; <a href="https://bltlly.com/2v6KVu">https://bltlly.com/2v6KVu</a></b></p><br /><br />
8
- <p>El juego se trata de una mancha mutante gruñón que escapa de un laboratorio y comienza a comer todo a su paso. La mancha puede crecer absorbiendo objetos sueltos en el entorno, como monedas, coches, vacas e incluso planetas. El blob también puede usar varios poderes y habilidades para superar obstáculos y enemigos. </p>
9
- <p>El juego cuenta con 24 niveles establecidos en diferentes lugares, como un campus universitario, una ciudad, una base militar y el espacio exterior. El juego también tiene niveles de bonificación que ponen a prueba tus habilidades y reflejos. </p>
10
- <h3>¿Por qué debería jugarlo? </h3>
11
- <p>Tales from Space: Mutant Blobs Attack es un juego divertido y adictivo que te mantendrá entretenido durante horas. El juego tiene un estilo de dibujos animados encantador que es colorido y humorístico. El juego también tiene una banda sonora pegadiza que coincide con el estado de ánimo de cada nivel. </p>
12
- <p>El juego es fácil de jugar pero difícil de dominar. El juego tiene controles simples que utilizan tanto los palillos como la pantalla táctil de PS Vita. El juego también tiene rompecabezas inteligentes que requieren que uses tus poderes sabiamente. </p>
13
- <p>El juego es <h2>Características</h2>
14
- <p>Tales from Space: Mutant Blobs Attack tiene muchas características que lo convierten en un juego de plataformas único y agradable. Estos son algunos de ellos:</p>
15
- <h3>Juego</h3>
16
-
17
- <p>El blob también tiene varios poderes y habilidades que puede usar para resolver puzzles y derrotar enemigos. Algunos de estos poderes son:</p>
18
- <p></p>
19
- <h4>Controles</h4>
20
- <ul>
21
- <li><b>Telekinesis:</b> La mancha puede usar su mente para mover ciertos objetos, como cajas de metal, imanes y cohetes. El blob también puede usar telekinesis para activar interruptores y palancas. </li>
22
- <li><b>Magnetismo:</b> La mancha puede atraer o repeler objetos metálicos, como monedas, tubos y tanques. La mancha también puede usar magnetismo para adherirse a superficies metálicas o volar a través de campos magnéticos. </li>
23
- <li><b>Rocket:</b> La mancha puede lanzarse como un cohete, usando la pantalla táctil para apuntar y el pulgar para controlar la velocidad y la dirección. La mancha puede utilizar el cohete para volar sobre los huecos, esquivar los obstáculos, y llegar a lugares altos. </li>
24
- </ul>
25
- <h4>Poderes</h4>
26
- <ul>
27
- <li><b>Salto de pared:</b> La mancha puede saltar de una pared a otra, usando el pulgar para cambiar de dirección. La mancha puede utilizar el salto de pared para subir superficies verticales o para cruzar pasajes estrechos. </li>
28
- <li><b>Rebote:</b> La mancha puede rebotar en ciertos objetos, como trampolines, globos y resortes. La mancha puede usar el rebote para saltar más alto o para alcanzar áreas ocultas. </li>
29
- <li><b>Goo:</b> El blob puede convertirse en goo, utilizando la pantalla táctil para difundir o retraer. La mancha puede utilizar la sustancia viscosa para exprimir a través de espacios reducidos o para cubrir grandes áreas. </li>
30
- </ul>
31
- <h4>Niveles</h4>
32
- <p>El juego tiene 24 niveles que se dividen en seis mundos, cada uno con un tema y configuración diferentes. Los mundos son:</p>
33
- <ul>
34
- <li><b>Campus universitario:</b> El primer mundo del juego, donde la mancha se escapa del laboratorio y comienza a comer todo lo que se ve. Los niveles incluyen aulas, dormitorios, cafeterías, bibliotecas y estadios. </li>
35
- <li><b>Centro de la ciudad:</b> El segundo mundo del juego, donde la mancha invade la ciudad y causa el caos. Los niveles incluyen calles, callejones, tejados, alcantarillas y subterráneos. </li>
36
-
37
- <li><b>Base Lunar:</b> El cuarto mundo del juego, donde la mancha viaja al espacio exterior y explora una estación lunar. Los niveles incluyen cráteres, módulos, cúpulas, satélites y cohetes. </li>
38
- <li><b>Sistema Solar:</b> El quinto mundo del juego, donde la mancha visita diferentes planetas y se los come. Los niveles incluyen Mercurio, Venus, la Tierra, Marte, Júpiter, Saturno, Urano, Neptuno y Plutón.</li>
39
- <li><b>Créditos:</b> El mundo final del juego, donde la mancha se come los créditos y los desarrolladores. Los niveles incluyen nombres, logotipos, imágenes y mensajes. </li>
40
- </ul>
41
- <h3>Gráficos</h3>
42
- <p>El juego tiene un estilo gráfico colorido y caricaturesco que se adapta al tono humorístico del juego. El juego utiliza gráficos 2D con efectos 3D, como sombras, iluminación y profundidad. El juego también tiene animaciones suaves y transiciones que hacen que la mancha se vea viva y expresiva. </p>
43
- <p>El juego tiene una variedad de ambientes que son detallados y diversos. El juego utiliza diferentes temas, colores y texturas para crear contraste y variedad. El juego también tiene elementos dinámicos, como objetos en movimiento, objetos destructibles y fondos interactivos. </p>
44
- <h3>Sonido</h3>
45
- <p>El juego tiene una banda sonora pegadiza y optimista que coincide con el estado de ánimo de cada nivel. El juego utiliza diferentes géneros, como rock, jazz, techno y orquestal, para crear diversidad y atmósfera. El juego también tiene efectos de sonido que mejoran la jugabilidad, como ruidos de comer, explosiones y clips de voz. </p>
46
- <h3>Comparación con otros juegos</h3>
47
- <p>Tales from Space: Mutant Blobs Attack es un juego de plataformas único que se destaca de otros juegos del género. Aquí hay una tabla que compara las características del juego con otros juegos similares:</p>
48
- <tabla>
49
- <tr>
50
- <th>Juego</th>
51
- <th>Características</th>
52
- </tr>
53
- <tr>
54
- <td>Cuentos desde el Espacio: Ataque de Blobs Mutantes</td>
55
- <td>- Comer y crecer mecánico<br>- Varios poderes y habilidades<br>- 24 niveles en seis mundos<br>- Gráficos de estilo de dibujos animados<br>- Banda sonora pegadiza</td>
56
- </tr>
57
- <tr>
58
-
59
- <td>- Personajes y niveles personalizables<br>- Pantalla táctil y controles de inclinación<br>- Multijugador y comunidad en línea<br>- Gráficos de estilo artesanal<br>- Banda sonora original</td>
60
- </tr>
61
- <tr>
62
- <td>Orígenes de Rayman</td>
63
- <td>- Juego rápido y fluido<br>- Modo cooperativo para cuatro jugadores<br>- 60 niveles en 12 mundos<br>- Gráficos dibujados a mano<br>- Banda sonora musical</td>
64
- </tr>
65
- <tr>
66
- <td>Super Meat Boy</td>
67
- <td>- Juego desafiante y preciso<br>- Gráficos estilo retro<br>- 300 niveles en siete capítulos<br>- Humor oscuro<br>- Banda sonora indie</td>
68
- </tr>
69
- </tabla>
70
- <h2>Recepción</h2>
71
- <p>Tales from Space: Mutant Blobs Attack recibió críticas positivas de críticos y jugadores por igual. El juego fue elogiado por su jugabilidad, gráficos, sonido y humor. El juego también fue reconocido como uno de los mejores juegos para PS Vita. Estos son algunos de los puntos destacados de la recepción:</p>
72
- <h3>Calificaciones</h3>
73
- <p>El juego recibió altas calificaciones de varias fuentes, como:</p>
74
- <ul>
75
- <li><b>Metacritic:</b> 87/100 basado en 35 comentarios</li>
76
- <li><b>GameRankings:</b> 86.67% basado en 18 comentarios</li>
77
- <li><b>IGN:</b> 9/10 basado en una revisión</li>
78
- <li><b>GameSpot:</b> 8/10 basado en una revisión</li>
79
- <li><b>Puntuación del usuario:</b> 8.4/10 basado en 76 valoraciones</li>
80
- </ul>
81
- <h3>Comentarios</h3>
82
- <p>El juego recibió comentarios positivos de varios revisores, como:</p>
83
- <ul>
84
- <li><b>Destructoid:</b> "Tales from Space: Mutant Blobs Attack es uno de los mejores juegos de PlayStation Vita, sin excepción. Es encantador, inteligente, lleno de contenido, y tiene un precio para complacer." </li>
85
- <li><b>Polygon:</b> "Mutant Blobs Attack es una alegría para jugar - un concepto simple ejecutado bien con suficiente variedad para mantener las cosas interesantes en todo." </li>
86
- <li><b>Giant Bomb:</b> "Mutant Blobs Attack es un gran ejemplo de cómo hacer un juego de plataformas divertido que no se basa en la nostalgia o trucos."</li>
87
- <li><b>Eurogamer:</b> "Mutant Blobs Attack es un juego de plataformas inteligentemente diseñado con bolsas de carácter y un genuino sentido de la diversión."</li>
88
-
89
- </ul>
90
- <h3>Premios</h3>
91
- <p>El juego ganó varios premios de varias fuentes, como:</p>
92
- <ul>
93
- <li><b>D.I.C.E. Premios:</b> Nominado para el juego portátil del año en 2013</li>
94
- <li><b>Premios GDC:</b> Nominado a mejor juego portátil/móvil en 2013</li>
95
- <li><b>Canadian Videogame Awards:</b> Ganó el mejor juego descargable y el mejor juego sobre la marcha en 2012</li>
96
- <li><b>Lo mejor de IGN de 2012:</b> Ganó el mejor juego de plataformas de PS Vita y el mejor sonido de PS Vita</li>
97
- <li><b>Lo mejor de Game Informer de 2012:</b> Ganó el mejor exclusivo de mano</li>
98
- <li><b>GameSpot’s Best of 2012:</b> Nominado a Mejor Juego de Plataformas y Mejor Juego de PS Vita</li>
99
- </ul>
100
- <h2>Descargar</h2>
101
- <p>Si estás interesado en jugar a Tales from Space: Mutant Blobs Attack, puedes descargarlo desde PlayStation Store. Estos son los pasos que debes seguir:</p>
102
- <h3>Cómo descargar Tales from Space: Mutant Blobs Attack on PS Vita</h3>
103
- <ol>
104
- <li>Enciende tu PS Vita y conéctate a internet. </li>
105
- <li>Ir a la aplicación PlayStation Store en la pantalla de inicio. </li>
106
- <li>Buscar cuentos desde el espacio: Mutant Blobs Ataque en la barra de búsqueda o navegar por las categorías. </li>
107
- <li>Selecciona el juego y toca el botón de descarga. </li>
108
- <li>Espera a que el juego se descargue e instale en tu PS Vita.</li>
109
- <li>¡Disfruta jugando! </li>
110
- </ol>
111
- <h3>¿Cuánto cuesta? </h3>
112
- <p>Tales from Space: Mutant Blobs Attack es un juego muy asequible que cuesta solo $7.99. También puedes obtener un descuento si eres miembro de PlayStation Plus. El juego bien vale el precio, considerando la cantidad de contenido y diversión que ofrece. </p>
113
- <h2>Conclusión</h2>
114
-
115
- <p>El juego recibió críticas positivas de críticos y jugadores por igual, que elogiaron su jugabilidad, gráficos, sonido y humor. El juego también ganó varios premios de diversas fuentes, que lo reconocieron como uno de los mejores juegos para PS Vita.</p>
116
- <p>Si estás buscando un juego de plataformas divertido y divertido que te mantenga entretenido durante horas, entonces definitivamente deberías descargar Tales from Space: Mutant Blobs Attack de PlayStation Store. ¡No te arrepentirás! </p>
117
- <p>¿Qué estás esperando? ¡Coge tu PS Vita y empieza a comer todo lo que tengas a la vista! </p>
118
- <h2>Preguntas frecuentes</h2>
119
- <ul>
120
- <li><b>Q: ¿Cuánto tiempo se tarda en completar el juego? </b><br>A: Depende de tu nivel de habilidad y de cuánto explores cada nivel, pero en promedio, se tarda de 4 a 5 horas en completar la historia principal. También puedes reproducir los niveles para encontrar objetos ocultos y mejorar tu puntuación. </li>
121
- <li><b>Q: ¿Hay un modo multijugador? </b><br>A: No, no hay modo multijugador en Tales from Space: Mutant Blobs Attack. Sin embargo, puedes comparar tus puntajes y logros con otros jugadores en línea a través de tablas de clasificación y trofeos. </li>
122
- <li><b>Q: ¿Hay una secuela o un DLC? </b><br>A: No, no hay secuela o un DLC para Tales from Space: Mutant Blobs Attack. Sin embargo, puedes jugar al juego anterior de la serie, Tales from Space: About a Blob, que también está disponible en PlayStation Store.</li>
123
- <li><b>Q: ¿Cuáles son los requisitos del sistema para el juego? </b><br>A: Necesitas un PS Vita con al menos 300 MB de espacio libre para descargar y jugar Tales from Space: Mutant Blobs Attack. También necesitas una conexión a Internet para acceder a PlayStation Store y a las funciones online. </li>
124
-
125
- </ul></p> 64aa2da5cf<br />
126
- <br />
127
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Buatong/Computing/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Computing
3
- emoji: 🌖
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.14.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/docs/_source/_static/mathjax_mathml.user.js DELETED
@@ -1,18 +0,0 @@
1
- // ==UserScript==
2
- // @name MathJax MathML
3
- // @namespace http://www.mathjax.org/
4
- // @description Insert MathJax into pages containing MathML
5
- // @include *
6
- // ==/UserScript==
7
-
8
- if ((window.unsafeWindow == null ? window : unsafeWindow).MathJax == null) {
9
- if ((document.getElementsByTagName("math").length > 0) ||
10
- (document.getElementsByTagNameNS == null ? false :
11
- (document.getElementsByTagNameNS("http://www.w3.org/1998/Math/MathML","math").length > 0))) {
12
- var script = document.createElement("script");
13
- script.type = "text/javascript";
14
- script.src = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_CHTML-full";
15
- var config = 'MathJax.Hub.Startup.onload()';
16
- document.getElementsByTagName("head")[0].appendChild(script);
17
- }
18
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/data/__init__.py DELETED
File without changes
spaces/CVPR/WALT/mmdet/models/dense_heads/reppoints_head.py DELETED
@@ -1,763 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
5
- from mmcv.ops import DeformConv2d
6
-
7
- from mmdet.core import (PointGenerator, build_assigner, build_sampler,
8
- images_to_levels, multi_apply, multiclass_nms, unmap)
9
- from ..builder import HEADS, build_loss
10
- from .anchor_free_head import AnchorFreeHead
11
-
12
-
13
- @HEADS.register_module()
14
- class RepPointsHead(AnchorFreeHead):
15
- """RepPoint head.
16
-
17
- Args:
18
- point_feat_channels (int): Number of channels of points features.
19
- gradient_mul (float): The multiplier to gradients from
20
- points refinement and recognition.
21
- point_strides (Iterable): points strides.
22
- point_base_scale (int): bbox scale for assigning labels.
23
- loss_cls (dict): Config of classification loss.
24
- loss_bbox_init (dict): Config of initial points loss.
25
- loss_bbox_refine (dict): Config of points loss in refinement.
26
- use_grid_points (bool): If we use bounding box representation, the
27
- reppoints is represented as grid points on the bounding box.
28
- center_init (bool): Whether to use center point assignment.
29
- transform_method (str): The methods to transform RepPoints to bbox.
30
- """ # noqa: W605
31
-
32
- def __init__(self,
33
- num_classes,
34
- in_channels,
35
- point_feat_channels=256,
36
- num_points=9,
37
- gradient_mul=0.1,
38
- point_strides=[8, 16, 32, 64, 128],
39
- point_base_scale=4,
40
- loss_cls=dict(
41
- type='FocalLoss',
42
- use_sigmoid=True,
43
- gamma=2.0,
44
- alpha=0.25,
45
- loss_weight=1.0),
46
- loss_bbox_init=dict(
47
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
48
- loss_bbox_refine=dict(
49
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
50
- use_grid_points=False,
51
- center_init=True,
52
- transform_method='moment',
53
- moment_mul=0.01,
54
- **kwargs):
55
- self.num_points = num_points
56
- self.point_feat_channels = point_feat_channels
57
- self.use_grid_points = use_grid_points
58
- self.center_init = center_init
59
-
60
- # we use deform conv to extract points features
61
- self.dcn_kernel = int(np.sqrt(num_points))
62
- self.dcn_pad = int((self.dcn_kernel - 1) / 2)
63
- assert self.dcn_kernel * self.dcn_kernel == num_points, \
64
- 'The points number should be a square number.'
65
- assert self.dcn_kernel % 2 == 1, \
66
- 'The points number should be an odd square number.'
67
- dcn_base = np.arange(-self.dcn_pad,
68
- self.dcn_pad + 1).astype(np.float64)
69
- dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
70
- dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
71
- dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
72
- (-1))
73
- self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
74
-
75
- super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
76
-
77
- self.gradient_mul = gradient_mul
78
- self.point_base_scale = point_base_scale
79
- self.point_strides = point_strides
80
- self.point_generators = [PointGenerator() for _ in self.point_strides]
81
-
82
- self.sampling = loss_cls['type'] not in ['FocalLoss']
83
- if self.train_cfg:
84
- self.init_assigner = build_assigner(self.train_cfg.init.assigner)
85
- self.refine_assigner = build_assigner(
86
- self.train_cfg.refine.assigner)
87
- # use PseudoSampler when sampling is False
88
- if self.sampling and hasattr(self.train_cfg, 'sampler'):
89
- sampler_cfg = self.train_cfg.sampler
90
- else:
91
- sampler_cfg = dict(type='PseudoSampler')
92
- self.sampler = build_sampler(sampler_cfg, context=self)
93
- self.transform_method = transform_method
94
- if self.transform_method == 'moment':
95
- self.moment_transfer = nn.Parameter(
96
- data=torch.zeros(2), requires_grad=True)
97
- self.moment_mul = moment_mul
98
-
99
- self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
100
- if self.use_sigmoid_cls:
101
- self.cls_out_channels = self.num_classes
102
- else:
103
- self.cls_out_channels = self.num_classes + 1
104
- self.loss_bbox_init = build_loss(loss_bbox_init)
105
- self.loss_bbox_refine = build_loss(loss_bbox_refine)
106
-
107
- def _init_layers(self):
108
- """Initialize layers of the head."""
109
- self.relu = nn.ReLU(inplace=True)
110
- self.cls_convs = nn.ModuleList()
111
- self.reg_convs = nn.ModuleList()
112
- for i in range(self.stacked_convs):
113
- chn = self.in_channels if i == 0 else self.feat_channels
114
- self.cls_convs.append(
115
- ConvModule(
116
- chn,
117
- self.feat_channels,
118
- 3,
119
- stride=1,
120
- padding=1,
121
- conv_cfg=self.conv_cfg,
122
- norm_cfg=self.norm_cfg))
123
- self.reg_convs.append(
124
- ConvModule(
125
- chn,
126
- self.feat_channels,
127
- 3,
128
- stride=1,
129
- padding=1,
130
- conv_cfg=self.conv_cfg,
131
- norm_cfg=self.norm_cfg))
132
- pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
133
- self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
134
- self.point_feat_channels,
135
- self.dcn_kernel, 1,
136
- self.dcn_pad)
137
- self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
138
- self.cls_out_channels, 1, 1, 0)
139
- self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
140
- self.point_feat_channels, 3,
141
- 1, 1)
142
- self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
143
- pts_out_dim, 1, 1, 0)
144
- self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
145
- self.point_feat_channels,
146
- self.dcn_kernel, 1,
147
- self.dcn_pad)
148
- self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
149
- pts_out_dim, 1, 1, 0)
150
-
151
- def init_weights(self):
152
- """Initialize weights of the head."""
153
- for m in self.cls_convs:
154
- normal_init(m.conv, std=0.01)
155
- for m in self.reg_convs:
156
- normal_init(m.conv, std=0.01)
157
- bias_cls = bias_init_with_prob(0.01)
158
- normal_init(self.reppoints_cls_conv, std=0.01)
159
- normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
160
- normal_init(self.reppoints_pts_init_conv, std=0.01)
161
- normal_init(self.reppoints_pts_init_out, std=0.01)
162
- normal_init(self.reppoints_pts_refine_conv, std=0.01)
163
- normal_init(self.reppoints_pts_refine_out, std=0.01)
164
-
165
- def points2bbox(self, pts, y_first=True):
166
- """Converting the points set into bounding box.
167
-
168
- :param pts: the input points sets (fields), each points
169
- set (fields) is represented as 2n scalar.
170
- :param y_first: if y_first=True, the point set is represented as
171
- [y1, x1, y2, x2 ... yn, xn], otherwise the point set is
172
- represented as [x1, y1, x2, y2 ... xn, yn].
173
- :return: each points set is converting to a bbox [x1, y1, x2, y2].
174
- """
175
- pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
176
- pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
177
- ...]
178
- pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
179
- ...]
180
- if self.transform_method == 'minmax':
181
- bbox_left = pts_x.min(dim=1, keepdim=True)[0]
182
- bbox_right = pts_x.max(dim=1, keepdim=True)[0]
183
- bbox_up = pts_y.min(dim=1, keepdim=True)[0]
184
- bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
185
- bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
186
- dim=1)
187
- elif self.transform_method == 'partial_minmax':
188
- pts_y = pts_y[:, :4, ...]
189
- pts_x = pts_x[:, :4, ...]
190
- bbox_left = pts_x.min(dim=1, keepdim=True)[0]
191
- bbox_right = pts_x.max(dim=1, keepdim=True)[0]
192
- bbox_up = pts_y.min(dim=1, keepdim=True)[0]
193
- bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
194
- bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
195
- dim=1)
196
- elif self.transform_method == 'moment':
197
- pts_y_mean = pts_y.mean(dim=1, keepdim=True)
198
- pts_x_mean = pts_x.mean(dim=1, keepdim=True)
199
- pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
200
- pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
201
- moment_transfer = (self.moment_transfer * self.moment_mul) + (
202
- self.moment_transfer.detach() * (1 - self.moment_mul))
203
- moment_width_transfer = moment_transfer[0]
204
- moment_height_transfer = moment_transfer[1]
205
- half_width = pts_x_std * torch.exp(moment_width_transfer)
206
- half_height = pts_y_std * torch.exp(moment_height_transfer)
207
- bbox = torch.cat([
208
- pts_x_mean - half_width, pts_y_mean - half_height,
209
- pts_x_mean + half_width, pts_y_mean + half_height
210
- ],
211
- dim=1)
212
- else:
213
- raise NotImplementedError
214
- return bbox
215
-
216
- def gen_grid_from_reg(self, reg, previous_boxes):
217
- """Base on the previous bboxes and regression values, we compute the
218
- regressed bboxes and generate the grids on the bboxes.
219
-
220
- :param reg: the regression value to previous bboxes.
221
- :param previous_boxes: previous bboxes.
222
- :return: generate grids on the regressed bboxes.
223
- """
224
- b, _, h, w = reg.shape
225
- bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
226
- bwh = (previous_boxes[:, 2:, ...] -
227
- previous_boxes[:, :2, ...]).clamp(min=1e-6)
228
- grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
229
- reg[:, 2:, ...])
230
- grid_wh = bwh * torch.exp(reg[:, 2:, ...])
231
- grid_left = grid_topleft[:, [0], ...]
232
- grid_top = grid_topleft[:, [1], ...]
233
- grid_width = grid_wh[:, [0], ...]
234
- grid_height = grid_wh[:, [1], ...]
235
- intervel = torch.linspace(0., 1., self.dcn_kernel).view(
236
- 1, self.dcn_kernel, 1, 1).type_as(reg)
237
- grid_x = grid_left + grid_width * intervel
238
- grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
239
- grid_x = grid_x.view(b, -1, h, w)
240
- grid_y = grid_top + grid_height * intervel
241
- grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
242
- grid_y = grid_y.view(b, -1, h, w)
243
- grid_yx = torch.stack([grid_y, grid_x], dim=2)
244
- grid_yx = grid_yx.view(b, -1, h, w)
245
- regressed_bbox = torch.cat([
246
- grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
247
- ], 1)
248
- return grid_yx, regressed_bbox
249
-
250
- def forward(self, feats):
251
- return multi_apply(self.forward_single, feats)
252
-
253
- def forward_single(self, x):
254
- """Forward feature map of a single FPN level."""
255
- dcn_base_offset = self.dcn_base_offset.type_as(x)
256
- # If we use center_init, the initial reppoints is from center points.
257
- # If we use bounding bbox representation, the initial reppoints is
258
- # from regular grid placed on a pre-defined bbox.
259
- if self.use_grid_points or not self.center_init:
260
- scale = self.point_base_scale / 2
261
- points_init = dcn_base_offset / dcn_base_offset.max() * scale
262
- bbox_init = x.new_tensor([-scale, -scale, scale,
263
- scale]).view(1, 4, 1, 1)
264
- else:
265
- points_init = 0
266
- cls_feat = x
267
- pts_feat = x
268
- for cls_conv in self.cls_convs:
269
- cls_feat = cls_conv(cls_feat)
270
- for reg_conv in self.reg_convs:
271
- pts_feat = reg_conv(pts_feat)
272
- # initialize reppoints
273
- pts_out_init = self.reppoints_pts_init_out(
274
- self.relu(self.reppoints_pts_init_conv(pts_feat)))
275
- if self.use_grid_points:
276
- pts_out_init, bbox_out_init = self.gen_grid_from_reg(
277
- pts_out_init, bbox_init.detach())
278
- else:
279
- pts_out_init = pts_out_init + points_init
280
- # refine and classify reppoints
281
- pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
282
- ) + self.gradient_mul * pts_out_init
283
- dcn_offset = pts_out_init_grad_mul - dcn_base_offset
284
- cls_out = self.reppoints_cls_out(
285
- self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
286
- pts_out_refine = self.reppoints_pts_refine_out(
287
- self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
288
- if self.use_grid_points:
289
- pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
290
- pts_out_refine, bbox_out_init.detach())
291
- else:
292
- pts_out_refine = pts_out_refine + pts_out_init.detach()
293
- return cls_out, pts_out_init, pts_out_refine
294
-
295
- def get_points(self, featmap_sizes, img_metas, device):
296
- """Get points according to feature map sizes.
297
-
298
- Args:
299
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
300
- img_metas (list[dict]): Image meta info.
301
-
302
- Returns:
303
- tuple: points of each image, valid flags of each image
304
- """
305
- num_imgs = len(img_metas)
306
- num_levels = len(featmap_sizes)
307
-
308
- # since feature map sizes of all images are the same, we only compute
309
- # points center for one time
310
- multi_level_points = []
311
- for i in range(num_levels):
312
- points = self.point_generators[i].grid_points(
313
- featmap_sizes[i], self.point_strides[i], device)
314
- multi_level_points.append(points)
315
- points_list = [[point.clone() for point in multi_level_points]
316
- for _ in range(num_imgs)]
317
-
318
- # for each image, we compute valid flags of multi level grids
319
- valid_flag_list = []
320
- for img_id, img_meta in enumerate(img_metas):
321
- multi_level_flags = []
322
- for i in range(num_levels):
323
- point_stride = self.point_strides[i]
324
- feat_h, feat_w = featmap_sizes[i]
325
- h, w = img_meta['pad_shape'][:2]
326
- valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
327
- valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
328
- flags = self.point_generators[i].valid_flags(
329
- (feat_h, feat_w), (valid_feat_h, valid_feat_w), device)
330
- multi_level_flags.append(flags)
331
- valid_flag_list.append(multi_level_flags)
332
-
333
- return points_list, valid_flag_list
334
-
335
- def centers_to_bboxes(self, point_list):
336
- """Get bboxes according to center points.
337
-
338
- Only used in :class:`MaxIoUAssigner`.
339
- """
340
- bbox_list = []
341
- for i_img, point in enumerate(point_list):
342
- bbox = []
343
- for i_lvl in range(len(self.point_strides)):
344
- scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
345
- bbox_shift = torch.Tensor([-scale, -scale, scale,
346
- scale]).view(1, 4).type_as(point[0])
347
- bbox_center = torch.cat(
348
- [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
349
- bbox.append(bbox_center + bbox_shift)
350
- bbox_list.append(bbox)
351
- return bbox_list
352
-
353
- def offset_to_pts(self, center_list, pred_list):
354
- """Change from point offset to point coordinate."""
355
- pts_list = []
356
- for i_lvl in range(len(self.point_strides)):
357
- pts_lvl = []
358
- for i_img in range(len(center_list)):
359
- pts_center = center_list[i_img][i_lvl][:, :2].repeat(
360
- 1, self.num_points)
361
- pts_shift = pred_list[i_lvl][i_img]
362
- yx_pts_shift = pts_shift.permute(1, 2, 0).view(
363
- -1, 2 * self.num_points)
364
- y_pts_shift = yx_pts_shift[..., 0::2]
365
- x_pts_shift = yx_pts_shift[..., 1::2]
366
- xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
367
- xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
368
- pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
369
- pts_lvl.append(pts)
370
- pts_lvl = torch.stack(pts_lvl, 0)
371
- pts_list.append(pts_lvl)
372
- return pts_list
373
-
374
- def _point_target_single(self,
375
- flat_proposals,
376
- valid_flags,
377
- gt_bboxes,
378
- gt_bboxes_ignore,
379
- gt_labels,
380
- label_channels=1,
381
- stage='init',
382
- unmap_outputs=True):
383
- inside_flags = valid_flags
384
- if not inside_flags.any():
385
- return (None, ) * 7
386
- # assign gt and sample proposals
387
- proposals = flat_proposals[inside_flags, :]
388
-
389
- if stage == 'init':
390
- assigner = self.init_assigner
391
- pos_weight = self.train_cfg.init.pos_weight
392
- else:
393
- assigner = self.refine_assigner
394
- pos_weight = self.train_cfg.refine.pos_weight
395
- assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
396
- None if self.sampling else gt_labels)
397
- sampling_result = self.sampler.sample(assign_result, proposals,
398
- gt_bboxes)
399
-
400
- num_valid_proposals = proposals.shape[0]
401
- bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
402
- pos_proposals = torch.zeros_like(proposals)
403
- proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
404
- labels = proposals.new_full((num_valid_proposals, ),
405
- self.num_classes,
406
- dtype=torch.long)
407
- label_weights = proposals.new_zeros(
408
- num_valid_proposals, dtype=torch.float)
409
-
410
- pos_inds = sampling_result.pos_inds
411
- neg_inds = sampling_result.neg_inds
412
- if len(pos_inds) > 0:
413
- pos_gt_bboxes = sampling_result.pos_gt_bboxes
414
- bbox_gt[pos_inds, :] = pos_gt_bboxes
415
- pos_proposals[pos_inds, :] = proposals[pos_inds, :]
416
- proposals_weights[pos_inds, :] = 1.0
417
- if gt_labels is None:
418
- # Only rpn gives gt_labels as None
419
- # Foreground is the first class
420
- labels[pos_inds] = 0
421
- else:
422
- labels[pos_inds] = gt_labels[
423
- sampling_result.pos_assigned_gt_inds]
424
- if pos_weight <= 0:
425
- label_weights[pos_inds] = 1.0
426
- else:
427
- label_weights[pos_inds] = pos_weight
428
- if len(neg_inds) > 0:
429
- label_weights[neg_inds] = 1.0
430
-
431
- # map up to original set of proposals
432
- if unmap_outputs:
433
- num_total_proposals = flat_proposals.size(0)
434
- labels = unmap(labels, num_total_proposals, inside_flags)
435
- label_weights = unmap(label_weights, num_total_proposals,
436
- inside_flags)
437
- bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
438
- pos_proposals = unmap(pos_proposals, num_total_proposals,
439
- inside_flags)
440
- proposals_weights = unmap(proposals_weights, num_total_proposals,
441
- inside_flags)
442
-
443
- return (labels, label_weights, bbox_gt, pos_proposals,
444
- proposals_weights, pos_inds, neg_inds)
445
-
446
- def get_targets(self,
447
- proposals_list,
448
- valid_flag_list,
449
- gt_bboxes_list,
450
- img_metas,
451
- gt_bboxes_ignore_list=None,
452
- gt_labels_list=None,
453
- stage='init',
454
- label_channels=1,
455
- unmap_outputs=True):
456
- """Compute corresponding GT box and classification targets for
457
- proposals.
458
-
459
- Args:
460
- proposals_list (list[list]): Multi level points/bboxes of each
461
- image.
462
- valid_flag_list (list[list]): Multi level valid flags of each
463
- image.
464
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
465
- img_metas (list[dict]): Meta info of each image.
466
- gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
467
- ignored.
468
- gt_bboxes_list (list[Tensor]): Ground truth labels of each box.
469
- stage (str): `init` or `refine`. Generate target for init stage or
470
- refine stage
471
- label_channels (int): Channel of label.
472
- unmap_outputs (bool): Whether to map outputs back to the original
473
- set of anchors.
474
-
475
- Returns:
476
- tuple:
477
- - labels_list (list[Tensor]): Labels of each level.
478
- - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501
479
- - bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
480
- - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501
481
- - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501
482
- - num_total_pos (int): Number of positive samples in all images. # noqa: E501
483
- - num_total_neg (int): Number of negative samples in all images. # noqa: E501
484
- """
485
- assert stage in ['init', 'refine']
486
- num_imgs = len(img_metas)
487
- assert len(proposals_list) == len(valid_flag_list) == num_imgs
488
-
489
- # points number of multi levels
490
- num_level_proposals = [points.size(0) for points in proposals_list[0]]
491
-
492
- # concat all level points and flags to a single tensor
493
- for i in range(num_imgs):
494
- assert len(proposals_list[i]) == len(valid_flag_list[i])
495
- proposals_list[i] = torch.cat(proposals_list[i])
496
- valid_flag_list[i] = torch.cat(valid_flag_list[i])
497
-
498
- # compute targets for each image
499
- if gt_bboxes_ignore_list is None:
500
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
501
- if gt_labels_list is None:
502
- gt_labels_list = [None for _ in range(num_imgs)]
503
- (all_labels, all_label_weights, all_bbox_gt, all_proposals,
504
- all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
505
- self._point_target_single,
506
- proposals_list,
507
- valid_flag_list,
508
- gt_bboxes_list,
509
- gt_bboxes_ignore_list,
510
- gt_labels_list,
511
- stage=stage,
512
- label_channels=label_channels,
513
- unmap_outputs=unmap_outputs)
514
- # no valid points
515
- if any([labels is None for labels in all_labels]):
516
- return None
517
- # sampled points of all images
518
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
519
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
520
- labels_list = images_to_levels(all_labels, num_level_proposals)
521
- label_weights_list = images_to_levels(all_label_weights,
522
- num_level_proposals)
523
- bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
524
- proposals_list = images_to_levels(all_proposals, num_level_proposals)
525
- proposal_weights_list = images_to_levels(all_proposal_weights,
526
- num_level_proposals)
527
- return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
528
- proposal_weights_list, num_total_pos, num_total_neg)
529
-
530
- def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
531
- label_weights, bbox_gt_init, bbox_weights_init,
532
- bbox_gt_refine, bbox_weights_refine, stride,
533
- num_total_samples_init, num_total_samples_refine):
534
- # classification loss
535
- labels = labels.reshape(-1)
536
- label_weights = label_weights.reshape(-1)
537
- cls_score = cls_score.permute(0, 2, 3,
538
- 1).reshape(-1, self.cls_out_channels)
539
- cls_score = cls_score.contiguous()
540
- loss_cls = self.loss_cls(
541
- cls_score,
542
- labels,
543
- label_weights,
544
- avg_factor=num_total_samples_refine)
545
-
546
- # points loss
547
- bbox_gt_init = bbox_gt_init.reshape(-1, 4)
548
- bbox_weights_init = bbox_weights_init.reshape(-1, 4)
549
- bbox_pred_init = self.points2bbox(
550
- pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
551
- bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
552
- bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
553
- bbox_pred_refine = self.points2bbox(
554
- pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
555
- normalize_term = self.point_base_scale * stride
556
- loss_pts_init = self.loss_bbox_init(
557
- bbox_pred_init / normalize_term,
558
- bbox_gt_init / normalize_term,
559
- bbox_weights_init,
560
- avg_factor=num_total_samples_init)
561
- loss_pts_refine = self.loss_bbox_refine(
562
- bbox_pred_refine / normalize_term,
563
- bbox_gt_refine / normalize_term,
564
- bbox_weights_refine,
565
- avg_factor=num_total_samples_refine)
566
- return loss_cls, loss_pts_init, loss_pts_refine
567
-
568
- def loss(self,
569
- cls_scores,
570
- pts_preds_init,
571
- pts_preds_refine,
572
- gt_bboxes,
573
- gt_labels,
574
- img_metas,
575
- gt_bboxes_ignore=None):
576
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
577
- assert len(featmap_sizes) == len(self.point_generators)
578
- device = cls_scores[0].device
579
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
580
-
581
- # target for initial stage
582
- center_list, valid_flag_list = self.get_points(featmap_sizes,
583
- img_metas, device)
584
- pts_coordinate_preds_init = self.offset_to_pts(center_list,
585
- pts_preds_init)
586
- if self.train_cfg.init.assigner['type'] == 'PointAssigner':
587
- # Assign target for center list
588
- candidate_list = center_list
589
- else:
590
- # transform center list to bbox list and
591
- # assign target for bbox list
592
- bbox_list = self.centers_to_bboxes(center_list)
593
- candidate_list = bbox_list
594
- cls_reg_targets_init = self.get_targets(
595
- candidate_list,
596
- valid_flag_list,
597
- gt_bboxes,
598
- img_metas,
599
- gt_bboxes_ignore_list=gt_bboxes_ignore,
600
- gt_labels_list=gt_labels,
601
- stage='init',
602
- label_channels=label_channels)
603
- (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
604
- num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
605
- num_total_samples_init = (
606
- num_total_pos_init +
607
- num_total_neg_init if self.sampling else num_total_pos_init)
608
-
609
- # target for refinement stage
610
- center_list, valid_flag_list = self.get_points(featmap_sizes,
611
- img_metas, device)
612
- pts_coordinate_preds_refine = self.offset_to_pts(
613
- center_list, pts_preds_refine)
614
- bbox_list = []
615
- for i_img, center in enumerate(center_list):
616
- bbox = []
617
- for i_lvl in range(len(pts_preds_refine)):
618
- bbox_preds_init = self.points2bbox(
619
- pts_preds_init[i_lvl].detach())
620
- bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
621
- bbox_center = torch.cat(
622
- [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
623
- bbox.append(bbox_center +
624
- bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
625
- bbox_list.append(bbox)
626
- cls_reg_targets_refine = self.get_targets(
627
- bbox_list,
628
- valid_flag_list,
629
- gt_bboxes,
630
- img_metas,
631
- gt_bboxes_ignore_list=gt_bboxes_ignore,
632
- gt_labels_list=gt_labels,
633
- stage='refine',
634
- label_channels=label_channels)
635
- (labels_list, label_weights_list, bbox_gt_list_refine,
636
- candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
637
- num_total_neg_refine) = cls_reg_targets_refine
638
- num_total_samples_refine = (
639
- num_total_pos_refine +
640
- num_total_neg_refine if self.sampling else num_total_pos_refine)
641
-
642
- # compute loss
643
- losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
644
- self.loss_single,
645
- cls_scores,
646
- pts_coordinate_preds_init,
647
- pts_coordinate_preds_refine,
648
- labels_list,
649
- label_weights_list,
650
- bbox_gt_list_init,
651
- bbox_weights_list_init,
652
- bbox_gt_list_refine,
653
- bbox_weights_list_refine,
654
- self.point_strides,
655
- num_total_samples_init=num_total_samples_init,
656
- num_total_samples_refine=num_total_samples_refine)
657
- loss_dict_all = {
658
- 'loss_cls': losses_cls,
659
- 'loss_pts_init': losses_pts_init,
660
- 'loss_pts_refine': losses_pts_refine
661
- }
662
- return loss_dict_all
663
-
664
- def get_bboxes(self,
665
- cls_scores,
666
- pts_preds_init,
667
- pts_preds_refine,
668
- img_metas,
669
- cfg=None,
670
- rescale=False,
671
- with_nms=True):
672
- assert len(cls_scores) == len(pts_preds_refine)
673
- device = cls_scores[0].device
674
- bbox_preds_refine = [
675
- self.points2bbox(pts_pred_refine)
676
- for pts_pred_refine in pts_preds_refine
677
- ]
678
- num_levels = len(cls_scores)
679
- mlvl_points = [
680
- self.point_generators[i].grid_points(cls_scores[i].size()[-2:],
681
- self.point_strides[i], device)
682
- for i in range(num_levels)
683
- ]
684
- result_list = []
685
- for img_id in range(len(img_metas)):
686
- cls_score_list = [
687
- cls_scores[i][img_id].detach() for i in range(num_levels)
688
- ]
689
- bbox_pred_list = [
690
- bbox_preds_refine[i][img_id].detach()
691
- for i in range(num_levels)
692
- ]
693
- img_shape = img_metas[img_id]['img_shape']
694
- scale_factor = img_metas[img_id]['scale_factor']
695
- proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
696
- mlvl_points, img_shape,
697
- scale_factor, cfg, rescale,
698
- with_nms)
699
- result_list.append(proposals)
700
- return result_list
701
-
702
- def _get_bboxes_single(self,
703
- cls_scores,
704
- bbox_preds,
705
- mlvl_points,
706
- img_shape,
707
- scale_factor,
708
- cfg,
709
- rescale=False,
710
- with_nms=True):
711
- cfg = self.test_cfg if cfg is None else cfg
712
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
713
- mlvl_bboxes = []
714
- mlvl_scores = []
715
- for i_lvl, (cls_score, bbox_pred, points) in enumerate(
716
- zip(cls_scores, bbox_preds, mlvl_points)):
717
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
718
- cls_score = cls_score.permute(1, 2,
719
- 0).reshape(-1, self.cls_out_channels)
720
- if self.use_sigmoid_cls:
721
- scores = cls_score.sigmoid()
722
- else:
723
- scores = cls_score.softmax(-1)
724
- bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
725
- nms_pre = cfg.get('nms_pre', -1)
726
- if nms_pre > 0 and scores.shape[0] > nms_pre:
727
- if self.use_sigmoid_cls:
728
- max_scores, _ = scores.max(dim=1)
729
- else:
730
- # remind that we set FG labels to [0, num_class-1]
731
- # since mmdet v2.0
732
- # BG cat_id: num_class
733
- max_scores, _ = scores[:, :-1].max(dim=1)
734
- _, topk_inds = max_scores.topk(nms_pre)
735
- points = points[topk_inds, :]
736
- bbox_pred = bbox_pred[topk_inds, :]
737
- scores = scores[topk_inds, :]
738
- bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
739
- bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
740
- x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
741
- y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
742
- x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
743
- y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
744
- bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
745
- mlvl_bboxes.append(bboxes)
746
- mlvl_scores.append(scores)
747
- mlvl_bboxes = torch.cat(mlvl_bboxes)
748
- if rescale:
749
- mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
750
- mlvl_scores = torch.cat(mlvl_scores)
751
- if self.use_sigmoid_cls:
752
- # Add a dummy background class to the backend when using sigmoid
753
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
754
- # BG cat_id: num_class
755
- padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
756
- mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
757
- if with_nms:
758
- det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
759
- cfg.score_thr, cfg.nms,
760
- cfg.max_per_img)
761
- return det_bboxes, det_labels
762
- else:
763
- return mlvl_bboxes, mlvl_scores
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cambino/dog-classifier-gradio/DogBreedClassifier.py DELETED
@@ -1,39 +0,0 @@
1
- import torchvision
2
- from torch import nn
3
- from torchvision.models import EfficientNet_B0_Weights
4
-
5
-
6
- def freeze_layers(model):
7
- """ Freezes model layers by settings requires_grad to False or True. """
8
- for num_params, param in enumerate(model.parameters()):
9
- if num_params > 140:
10
- param.requires_grad = True
11
- else:
12
- param.requires_grad = False
13
- return model
14
-
15
-
16
- class DogBreedClassifier(nn.Module):
17
- def __init__(self):
18
- super(DogBreedClassifier, self).__init__()
19
- self.resnet = freeze_layers(torchvision.models.efficientnet_b0(weights=EfficientNet_B0_Weights.DEFAULT))
20
-
21
- self.custom_classifier = nn.Sequential(
22
- nn.Linear(1280, 256),
23
- nn.ReLU(),
24
- nn.Dropout(p=0.5),
25
- nn.Linear(256, 120),
26
- )
27
-
28
- # replace classifier layers of pretrained model
29
- self.resnet.classifier = self.custom_classifier
30
-
31
- def forward(self, x):
32
- x = self.resnet(x)
33
- return x
34
-
35
-
36
- if __name__ == '__main__':
37
- model = DogBreedClassifier()
38
-
39
- print(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chirag4579/prakalpa-image-comparator/setup.sh DELETED
@@ -1,18 +0,0 @@
1
- mkdir -p ~/.streamlit/
2
- echo "\
3
- [general]\n\
4
- email = \"[email protected]\"\n\
5
- " > ~/.streamlit/credentials.toml
6
- echo "[global]
7
- showWarningOnDirectExecution = false
8
- [theme]
9
- primaryColor = '#f21111'
10
- backgroundColor='#0e1117'
11
- secondaryBackgroundColor='#31333F'
12
- textColor='#fafafa'
13
- font='sans serif'
14
- [server]\n\
15
- headless = true\n\
16
- enableCORS=false\n\
17
- port = $PORT\n\
18
- " > ~/.streamlit/config.toml
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/events/online.js DELETED
@@ -1,18 +0,0 @@
1
- import EventListener from '../listener/listener.js'
2
- import cfg from '../config/config.js'
3
-
4
- /**
5
- * 监听上线事件
6
- */
7
- export default class onlineEvent extends EventListener {
8
- constructor () {
9
- super({
10
- event: 'online',
11
- once: true
12
- })
13
- }
14
-
15
- async execute () {
16
- logger.mark('----^_^----')
17
- }
18
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Crow34/Joi/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Joi
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DGSpitzer/TXT-2-IMG-2-MUSIC-2-VIDEO-w-RIFFUSION/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Txt 2 Img 2 Music 2 Video w Riffusion
3
- emoji: ☯️🎨🎸🎞
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.8.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: DGSpitzer/TXT-2-IMG-2-MUSIC-2-VIDEO
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/V_D_M_X_.py DELETED
@@ -1,241 +0,0 @@
1
- from . import DefaultTable
2
- from fontTools.misc import sstruct
3
- from fontTools.misc.textTools import safeEval
4
- import struct
5
-
6
- VDMX_HeaderFmt = """
7
- > # big endian
8
- version: H # Version number (0 or 1)
9
- numRecs: H # Number of VDMX groups present
10
- numRatios: H # Number of aspect ratio groupings
11
- """
12
- # the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect
13
- # ratio ranges);
14
- VDMX_RatRangeFmt = """
15
- > # big endian
16
- bCharSet: B # Character set
17
- xRatio: B # Value to use for x-Ratio
18
- yStartRatio: B # Starting y-Ratio value
19
- yEndRatio: B # Ending y-Ratio value
20
- """
21
- # followed by an array of offset[numRatios] from start of VDMX table to the
22
- # VDMX Group for this ratio range (offsets will be re-calculated on compile);
23
- # followed by an array of Group[numRecs] records;
24
- VDMX_GroupFmt = """
25
- > # big endian
26
- recs: H # Number of height records in this group
27
- startsz: B # Starting yPelHeight
28
- endsz: B # Ending yPelHeight
29
- """
30
- # followed by an array of vTable[recs] records.
31
- VDMX_vTableFmt = """
32
- > # big endian
33
- yPelHeight: H # yPelHeight to which values apply
34
- yMax: h # Maximum value (in pels) for this yPelHeight
35
- yMin: h # Minimum value (in pels) for this yPelHeight
36
- """
37
-
38
-
39
- class table_V_D_M_X_(DefaultTable.DefaultTable):
40
- def decompile(self, data, ttFont):
41
- pos = 0 # track current position from to start of VDMX table
42
- dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
43
- pos += sstruct.calcsize(VDMX_HeaderFmt)
44
- self.ratRanges = []
45
- for i in range(self.numRatios):
46
- ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
47
- pos += sstruct.calcsize(VDMX_RatRangeFmt)
48
- # the mapping between a ratio and a group is defined further below
49
- ratio["groupIndex"] = None
50
- self.ratRanges.append(ratio)
51
- lenOffset = struct.calcsize(">H")
52
- _offsets = [] # temporarily store offsets to groups
53
- for i in range(self.numRatios):
54
- offset = struct.unpack(">H", data[0:lenOffset])[0]
55
- data = data[lenOffset:]
56
- pos += lenOffset
57
- _offsets.append(offset)
58
- self.groups = []
59
- for groupIndex in range(self.numRecs):
60
- # the offset to this group from beginning of the VDMX table
61
- currOffset = pos
62
- group, data = sstruct.unpack2(VDMX_GroupFmt, data)
63
- # the group lenght and bounding sizes are re-calculated on compile
64
- recs = group.pop("recs")
65
- startsz = group.pop("startsz")
66
- endsz = group.pop("endsz")
67
- pos += sstruct.calcsize(VDMX_GroupFmt)
68
- for j in range(recs):
69
- vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
70
- vTableLength = sstruct.calcsize(VDMX_vTableFmt)
71
- pos += vTableLength
72
- # group is a dict of (yMax, yMin) tuples keyed by yPelHeight
73
- group[vTable["yPelHeight"]] = (vTable["yMax"], vTable["yMin"])
74
- # make sure startsz and endsz match the calculated values
75
- minSize = min(group.keys())
76
- maxSize = max(group.keys())
77
- assert (
78
- startsz == minSize
79
- ), "startsz (%s) must equal min yPelHeight (%s): group %d" % (
80
- group.startsz,
81
- minSize,
82
- groupIndex,
83
- )
84
- assert (
85
- endsz == maxSize
86
- ), "endsz (%s) must equal max yPelHeight (%s): group %d" % (
87
- group.endsz,
88
- maxSize,
89
- groupIndex,
90
- )
91
- self.groups.append(group)
92
- # match the defined offsets with the current group's offset
93
- for offsetIndex, offsetValue in enumerate(_offsets):
94
- # when numRecs < numRatios there can more than one ratio range
95
- # sharing the same VDMX group
96
- if currOffset == offsetValue:
97
- # map the group with the ratio range thas has the same
98
- # index as the offset to that group (it took me a while..)
99
- self.ratRanges[offsetIndex]["groupIndex"] = groupIndex
100
- # check that all ratio ranges have a group
101
- for i in range(self.numRatios):
102
- ratio = self.ratRanges[i]
103
- if ratio["groupIndex"] is None:
104
- from fontTools import ttLib
105
-
106
- raise ttLib.TTLibError("no group defined for ratRange %d" % i)
107
-
108
- def _getOffsets(self):
109
- """
110
- Calculate offsets to VDMX_Group records.
111
- For each ratRange return a list of offset values from the beginning of
112
- the VDMX table to a VDMX_Group.
113
- """
114
- lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
115
- lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
116
- lenOffset = struct.calcsize(">H")
117
- lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
118
- lenVTable = sstruct.calcsize(VDMX_vTableFmt)
119
- # offset to the first group
120
- pos = lenHeader + self.numRatios * lenRatRange + self.numRatios * lenOffset
121
- groupOffsets = []
122
- for group in self.groups:
123
- groupOffsets.append(pos)
124
- lenGroup = lenGroupHeader + len(group) * lenVTable
125
- pos += lenGroup # offset to next group
126
- offsets = []
127
- for ratio in self.ratRanges:
128
- groupIndex = ratio["groupIndex"]
129
- offsets.append(groupOffsets[groupIndex])
130
- return offsets
131
-
132
- def compile(self, ttFont):
133
- if not (self.version == 0 or self.version == 1):
134
- from fontTools import ttLib
135
-
136
- raise ttLib.TTLibError(
137
- "unknown format for VDMX table: version %s" % self.version
138
- )
139
- data = sstruct.pack(VDMX_HeaderFmt, self)
140
- for ratio in self.ratRanges:
141
- data += sstruct.pack(VDMX_RatRangeFmt, ratio)
142
- # recalculate offsets to VDMX groups
143
- for offset in self._getOffsets():
144
- data += struct.pack(">H", offset)
145
- for group in self.groups:
146
- recs = len(group)
147
- startsz = min(group.keys())
148
- endsz = max(group.keys())
149
- gHeader = {"recs": recs, "startsz": startsz, "endsz": endsz}
150
- data += sstruct.pack(VDMX_GroupFmt, gHeader)
151
- for yPelHeight, (yMax, yMin) in sorted(group.items()):
152
- vTable = {"yPelHeight": yPelHeight, "yMax": yMax, "yMin": yMin}
153
- data += sstruct.pack(VDMX_vTableFmt, vTable)
154
- return data
155
-
156
- def toXML(self, writer, ttFont):
157
- writer.simpletag("version", value=self.version)
158
- writer.newline()
159
- writer.begintag("ratRanges")
160
- writer.newline()
161
- for ratio in self.ratRanges:
162
- groupIndex = ratio["groupIndex"]
163
- writer.simpletag(
164
- "ratRange",
165
- bCharSet=ratio["bCharSet"],
166
- xRatio=ratio["xRatio"],
167
- yStartRatio=ratio["yStartRatio"],
168
- yEndRatio=ratio["yEndRatio"],
169
- groupIndex=groupIndex,
170
- )
171
- writer.newline()
172
- writer.endtag("ratRanges")
173
- writer.newline()
174
- writer.begintag("groups")
175
- writer.newline()
176
- for groupIndex in range(self.numRecs):
177
- group = self.groups[groupIndex]
178
- recs = len(group)
179
- startsz = min(group.keys())
180
- endsz = max(group.keys())
181
- writer.begintag("group", index=groupIndex)
182
- writer.newline()
183
- writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz))
184
- writer.newline()
185
- for yPelHeight, (yMax, yMin) in sorted(group.items()):
186
- writer.simpletag(
187
- "record",
188
- [("yPelHeight", yPelHeight), ("yMax", yMax), ("yMin", yMin)],
189
- )
190
- writer.newline()
191
- writer.endtag("group")
192
- writer.newline()
193
- writer.endtag("groups")
194
- writer.newline()
195
-
196
- def fromXML(self, name, attrs, content, ttFont):
197
- if name == "version":
198
- self.version = safeEval(attrs["value"])
199
- elif name == "ratRanges":
200
- if not hasattr(self, "ratRanges"):
201
- self.ratRanges = []
202
- for element in content:
203
- if not isinstance(element, tuple):
204
- continue
205
- name, attrs, content = element
206
- if name == "ratRange":
207
- if not hasattr(self, "numRatios"):
208
- self.numRatios = 1
209
- else:
210
- self.numRatios += 1
211
- ratio = {
212
- "bCharSet": safeEval(attrs["bCharSet"]),
213
- "xRatio": safeEval(attrs["xRatio"]),
214
- "yStartRatio": safeEval(attrs["yStartRatio"]),
215
- "yEndRatio": safeEval(attrs["yEndRatio"]),
216
- "groupIndex": safeEval(attrs["groupIndex"]),
217
- }
218
- self.ratRanges.append(ratio)
219
- elif name == "groups":
220
- if not hasattr(self, "groups"):
221
- self.groups = []
222
- for element in content:
223
- if not isinstance(element, tuple):
224
- continue
225
- name, attrs, content = element
226
- if name == "group":
227
- if not hasattr(self, "numRecs"):
228
- self.numRecs = 1
229
- else:
230
- self.numRecs += 1
231
- group = {}
232
- for element in content:
233
- if not isinstance(element, tuple):
234
- continue
235
- name, attrs, content = element
236
- if name == "record":
237
- yPelHeight = safeEval(attrs["yPelHeight"])
238
- yMax = safeEval(attrs["yMax"])
239
- yMin = safeEval(attrs["yMin"])
240
- group[yPelHeight] = (yMax, yMin)
241
- self.groups.append(group)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/tests/helpers.py DELETED
@@ -1,101 +0,0 @@
1
- from typing import cast, List, Type, Union, ValuesView
2
-
3
- from .._connection import Connection, NEED_DATA, PAUSED
4
- from .._events import (
5
- ConnectionClosed,
6
- Data,
7
- EndOfMessage,
8
- Event,
9
- InformationalResponse,
10
- Request,
11
- Response,
12
- )
13
- from .._state import CLIENT, CLOSED, DONE, MUST_CLOSE, SERVER
14
- from .._util import Sentinel
15
-
16
- try:
17
- from typing import Literal
18
- except ImportError:
19
- from typing_extensions import Literal # type: ignore
20
-
21
-
22
- def get_all_events(conn: Connection) -> List[Event]:
23
- got_events = []
24
- while True:
25
- event = conn.next_event()
26
- if event in (NEED_DATA, PAUSED):
27
- break
28
- event = cast(Event, event)
29
- got_events.append(event)
30
- if type(event) is ConnectionClosed:
31
- break
32
- return got_events
33
-
34
-
35
- def receive_and_get(conn: Connection, data: bytes) -> List[Event]:
36
- conn.receive_data(data)
37
- return get_all_events(conn)
38
-
39
-
40
- # Merges adjacent Data events, converts payloads to bytestrings, and removes
41
- # chunk boundaries.
42
- def normalize_data_events(in_events: List[Event]) -> List[Event]:
43
- out_events: List[Event] = []
44
- for event in in_events:
45
- if type(event) is Data:
46
- event = Data(data=bytes(event.data), chunk_start=False, chunk_end=False)
47
- if out_events and type(out_events[-1]) is type(event) is Data:
48
- out_events[-1] = Data(
49
- data=out_events[-1].data + event.data,
50
- chunk_start=out_events[-1].chunk_start,
51
- chunk_end=out_events[-1].chunk_end,
52
- )
53
- else:
54
- out_events.append(event)
55
- return out_events
56
-
57
-
58
- # Given that we want to write tests that push some events through a Connection
59
- # and check that its state updates appropriately... we might as make a habit
60
- # of pushing them through two Connections with a fake network link in
61
- # between.
62
- class ConnectionPair:
63
- def __init__(self) -> None:
64
- self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)}
65
- self.other = {CLIENT: SERVER, SERVER: CLIENT}
66
-
67
- @property
68
- def conns(self) -> ValuesView[Connection]:
69
- return self.conn.values()
70
-
71
- # expect="match" if expect=send_events; expect=[...] to say what expected
72
- def send(
73
- self,
74
- role: Type[Sentinel],
75
- send_events: Union[List[Event], Event],
76
- expect: Union[List[Event], Event, Literal["match"]] = "match",
77
- ) -> bytes:
78
- if not isinstance(send_events, list):
79
- send_events = [send_events]
80
- data = b""
81
- closed = False
82
- for send_event in send_events:
83
- new_data = self.conn[role].send(send_event)
84
- if new_data is None:
85
- closed = True
86
- else:
87
- data += new_data
88
- # send uses b"" to mean b"", and None to mean closed
89
- # receive uses b"" to mean closed, and None to mean "try again"
90
- # so we have to translate between the two conventions
91
- if data:
92
- self.conn[self.other[role]].receive_data(data)
93
- if closed:
94
- self.conn[self.other[role]].receive_data(b"")
95
- got_events = get_all_events(self.conn[self.other[role]])
96
- if expect == "match":
97
- expect = send_events
98
- if not isinstance(expect, list):
99
- expect = [expect]
100
- assert got_events == expect
101
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/hf-Chat-ui/README.md DELETED
@@ -1,71 +0,0 @@
1
- ---
2
- title: chat-ui
3
- emoji: 🔥
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- license: apache-2.0
9
- base_path: /chat
10
- app_port: 3000
11
- duplicated_from: huggingchat/chat-ui
12
- ---
13
-
14
- # Chat UI
15
-
16
- A chat interface using open source models, eg OpenAssistant.
17
-
18
- ## Launch
19
-
20
- ```bash
21
- npm install
22
- npm run dev
23
- ```
24
-
25
- ## Environment
26
-
27
- Default configuration is in `.env`. Put custom config and secrets in `.env.local`, it will override the values in `.env`.
28
-
29
- Check out [.env](./.env) to see what needs to be set.
30
-
31
- Basically you need to create a `.env.local` with the following contents:
32
-
33
- ```
34
- MONGODB_URL=<url to mongo, for example a free MongoDB Atlas sandbox instance>
35
- HF_ACCESS_TOKEN=<your HF access token from https://huggingface.co/settings/tokens>
36
- ```
37
-
38
- ## Duplicating to a Space
39
-
40
- Create a `DOTENV_LOCAL` secret to your space with the following contents:
41
-
42
- ```
43
- MONGODB_URL=<url to mongo, for example a free MongoDB Atlas sandbox instance>
44
- HF_ACCESS_TOKEN=<your HF access token from https://huggingface.co/settings/tokens>
45
- ```
46
-
47
- Where the contents in `<...>` are replaced by the MongoDB URL and your [HF Access Token](https://huggingface.co/settings/tokens).
48
-
49
- ## Running Local Inference
50
-
51
- Both the example above use the HF Inference API or HF Endpoints API.
52
-
53
- If you want to run the model locally, you need to run this inference server locally: https://github.com/huggingface/text-generation-inference
54
-
55
- And add this to your `.env.local`:
56
-
57
- ```
58
- MODELS=`[{"name": "...", "endpoints": [{"url": "127.0.0.1:8080/generate_stream"}]}]`
59
- ```
60
-
61
- ## Building
62
-
63
- To create a production version of your app:
64
-
65
- ```bash
66
- npm run build
67
- ```
68
-
69
- You can preview the production build with `npm run preview`.
70
-
71
- > To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/hf-Chat-ui/src/styles/highlight-js.css DELETED
@@ -1 +0,0 @@
1
- @import "highlight.js/styles/atom-one-dark";
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/swin_transformer.py DELETED
@@ -1,43 +0,0 @@
1
- from models.modules.transformer_modules import *
2
-
3
-
4
- class Swin_Transformer(nn.Module):
5
- def __init__(self, dim, depth, heads, win_size, dim_head, mlp_dim,
6
- dropout=0., patch_num=None, ape=None, rpe=None, rpe_pos=1):
7
- super().__init__()
8
- self.absolute_pos_embed = None if patch_num is None or ape is None else AbsolutePosition(dim, dropout,
9
- patch_num, ape)
10
- self.pos_dropout = nn.Dropout(dropout)
11
- self.layers = nn.ModuleList([])
12
- for i in range(depth):
13
- self.layers.append(nn.ModuleList([
14
- PreNorm(dim, WinAttention(dim, win_size=win_size, shift=0 if (i % 2 == 0) else win_size // 2,
15
- heads=heads, dim_head=dim_head, dropout=dropout, rpe=rpe, rpe_pos=rpe_pos)),
16
- PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)),
17
- ]))
18
-
19
- def forward(self, x):
20
- if self.absolute_pos_embed is not None:
21
- x = self.absolute_pos_embed(x)
22
- x = self.pos_dropout(x)
23
- for attn, ff in self.layers:
24
- x = attn(x) + x
25
- x = ff(x) + x
26
- return x
27
-
28
-
29
- if __name__ == '__main__':
30
- token_dim = 1024
31
- toke_len = 256
32
-
33
- transformer = Swin_Transformer(dim=token_dim,
34
- depth=6,
35
- heads=16,
36
- win_size=8,
37
- dim_head=64,
38
- mlp_dim=2048,
39
- dropout=0.1)
40
-
41
- input = torch.randn(1, toke_len, token_dim)
42
- output = transformer(input)
43
- print(output.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/aisatsu-app-api/app.py DELETED
@@ -1,52 +0,0 @@
1
- from gtts import gTTS
2
- from io import BytesIO
3
- import base64
4
- from PIL import Image
5
- import cv2
6
- import numpy as np
7
- import gradio as gr
8
- from ultralyticsplus import YOLO
9
- from base64 import b64encode
10
- from speech_recognition import AudioFile, Recognizer
11
- import numpy as np
12
- from utils import tts, read_image_file, pil_to_base64, base64_to_pil, get_hist
13
- from scipy.spatial import distance as dist
14
-
15
- model = YOLO('ultralyticsplus/yolov8s')
16
- CLASS = model.model.names
17
- defaul_bot_voice = "おはいようございます"
18
- area_thres = 0.3
19
-
20
- def infer(image, last_seen):
21
- results = model.predict(image, show=False)[0]
22
- masks, boxes = results.masks, results.boxes
23
- area_image = image.width * image.height
24
- voice_bot = None
25
- most_close = 0
26
- out_img = None
27
- diff_value = 0.5
28
- if boxes is not None:
29
- for xyxy, conf, cls in zip(boxes.xyxy, boxes.conf, boxes.cls):
30
- if int(cls) != 0:
31
- continue
32
- box = xyxy.tolist()
33
- area_rate = (box[2] - box[0]) * (box[3] - box[1]) / area_image
34
- if area_rate >= most_close:
35
- out_img = image.crop(tuple(box)).resize((64, 64))
36
- most_close = area_rate
37
- if last_seen != "":
38
- last_seen = base64_to_pil(last_seen)
39
- if out_img is not None:
40
- diff_value = dist.euclidean(get_hist(out_img), get_hist(last_seen))
41
- print(most_close, diff_value)
42
- if most_close >= area_thres and diff_value >= 0.5:
43
- voice_bot = tts(defaul_bot_voice, language="ja")
44
- return out_img, voice_bot
45
-
46
- iface = gr.Interface(
47
- fn=infer,
48
- title="aisatsu api",
49
- inputs=[gr.Image(label="image", type="pil", shape=(320, 320)), gr.Textbox(label="last seen", value="")],
50
- outputs=[gr.Image(label="output image"), gr.Textbox(label="output voice")],
51
- article = "Author: <a href=\"https://huggingface.co/vumichien\">Vu Minh Chien</a>.",
52
- ).launch(enable_queue=True, debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/kalmanFilter.cpp DELETED
@@ -1,152 +0,0 @@
1
- #include "kalmanFilter.h"
2
- #include <Eigen/Cholesky>
3
-
4
- namespace byte_kalman
5
- {
6
- const double KalmanFilter::chi2inv95[10] = {
7
- 0,
8
- 3.8415,
9
- 5.9915,
10
- 7.8147,
11
- 9.4877,
12
- 11.070,
13
- 12.592,
14
- 14.067,
15
- 15.507,
16
- 16.919
17
- };
18
- KalmanFilter::KalmanFilter()
19
- {
20
- int ndim = 4;
21
- double dt = 1.;
22
-
23
- _motion_mat = Eigen::MatrixXf::Identity(8, 8);
24
- for (int i = 0; i < ndim; i++) {
25
- _motion_mat(i, ndim + i) = dt;
26
- }
27
- _update_mat = Eigen::MatrixXf::Identity(4, 8);
28
-
29
- this->_std_weight_position = 1. / 20;
30
- this->_std_weight_velocity = 1. / 160;
31
- }
32
-
33
- KAL_DATA KalmanFilter::initiate(const DETECTBOX &measurement)
34
- {
35
- DETECTBOX mean_pos = measurement;
36
- DETECTBOX mean_vel;
37
- for (int i = 0; i < 4; i++) mean_vel(i) = 0;
38
-
39
- KAL_MEAN mean;
40
- for (int i = 0; i < 8; i++) {
41
- if (i < 4) mean(i) = mean_pos(i);
42
- else mean(i) = mean_vel(i - 4);
43
- }
44
-
45
- KAL_MEAN std;
46
- std(0) = 2 * _std_weight_position * measurement[3];
47
- std(1) = 2 * _std_weight_position * measurement[3];
48
- std(2) = 1e-2;
49
- std(3) = 2 * _std_weight_position * measurement[3];
50
- std(4) = 10 * _std_weight_velocity * measurement[3];
51
- std(5) = 10 * _std_weight_velocity * measurement[3];
52
- std(6) = 1e-5;
53
- std(7) = 10 * _std_weight_velocity * measurement[3];
54
-
55
- KAL_MEAN tmp = std.array().square();
56
- KAL_COVA var = tmp.asDiagonal();
57
- return std::make_pair(mean, var);
58
- }
59
-
60
- void KalmanFilter::predict(KAL_MEAN &mean, KAL_COVA &covariance)
61
- {
62
- //revise the data;
63
- DETECTBOX std_pos;
64
- std_pos << _std_weight_position * mean(3),
65
- _std_weight_position * mean(3),
66
- 1e-2,
67
- _std_weight_position * mean(3);
68
- DETECTBOX std_vel;
69
- std_vel << _std_weight_velocity * mean(3),
70
- _std_weight_velocity * mean(3),
71
- 1e-5,
72
- _std_weight_velocity * mean(3);
73
- KAL_MEAN tmp;
74
- tmp.block<1, 4>(0, 0) = std_pos;
75
- tmp.block<1, 4>(0, 4) = std_vel;
76
- tmp = tmp.array().square();
77
- KAL_COVA motion_cov = tmp.asDiagonal();
78
- KAL_MEAN mean1 = this->_motion_mat * mean.transpose();
79
- KAL_COVA covariance1 = this->_motion_mat * covariance *(_motion_mat.transpose());
80
- covariance1 += motion_cov;
81
-
82
- mean = mean1;
83
- covariance = covariance1;
84
- }
85
-
86
- KAL_HDATA KalmanFilter::project(const KAL_MEAN &mean, const KAL_COVA &covariance)
87
- {
88
- DETECTBOX std;
89
- std << _std_weight_position * mean(3), _std_weight_position * mean(3),
90
- 1e-1, _std_weight_position * mean(3);
91
- KAL_HMEAN mean1 = _update_mat * mean.transpose();
92
- KAL_HCOVA covariance1 = _update_mat * covariance * (_update_mat.transpose());
93
- Eigen::Matrix<float, 4, 4> diag = std.asDiagonal();
94
- diag = diag.array().square().matrix();
95
- covariance1 += diag;
96
- // covariance1.diagonal() << diag;
97
- return std::make_pair(mean1, covariance1);
98
- }
99
-
100
- KAL_DATA
101
- KalmanFilter::update(
102
- const KAL_MEAN &mean,
103
- const KAL_COVA &covariance,
104
- const DETECTBOX &measurement)
105
- {
106
- KAL_HDATA pa = project(mean, covariance);
107
- KAL_HMEAN projected_mean = pa.first;
108
- KAL_HCOVA projected_cov = pa.second;
109
-
110
- //chol_factor, lower =
111
- //scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
112
- //kalmain_gain =
113
- //scipy.linalg.cho_solve((cho_factor, lower),
114
- //np.dot(covariance, self._upadte_mat.T).T,
115
- //check_finite=False).T
116
- Eigen::Matrix<float, 4, 8> B = (covariance * (_update_mat.transpose())).transpose();
117
- Eigen::Matrix<float, 8, 4> kalman_gain = (projected_cov.llt().solve(B)).transpose(); // eg.8x4
118
- Eigen::Matrix<float, 1, 4> innovation = measurement - projected_mean; //eg.1x4
119
- auto tmp = innovation * (kalman_gain.transpose());
120
- KAL_MEAN new_mean = (mean.array() + tmp.array()).matrix();
121
- KAL_COVA new_covariance = covariance - kalman_gain * projected_cov*(kalman_gain.transpose());
122
- return std::make_pair(new_mean, new_covariance);
123
- }
124
-
125
- Eigen::Matrix<float, 1, -1>
126
- KalmanFilter::gating_distance(
127
- const KAL_MEAN &mean,
128
- const KAL_COVA &covariance,
129
- const std::vector<DETECTBOX> &measurements,
130
- bool only_position)
131
- {
132
- KAL_HDATA pa = this->project(mean, covariance);
133
- if (only_position) {
134
- printf("not implement!");
135
- exit(0);
136
- }
137
- KAL_HMEAN mean1 = pa.first;
138
- KAL_HCOVA covariance1 = pa.second;
139
-
140
- // Eigen::Matrix<float, -1, 4, Eigen::RowMajor> d(size, 4);
141
- DETECTBOXSS d(measurements.size(), 4);
142
- int pos = 0;
143
- for (DETECTBOX box : measurements) {
144
- d.row(pos++) = box - mean1;
145
- }
146
- Eigen::Matrix<float, -1, -1, Eigen::RowMajor> factor = covariance1.llt().matrixL();
147
- Eigen::Matrix<float, -1, -1> z = factor.triangularView<Eigen::Lower>().solve<Eigen::OnTheRight>(d).transpose();
148
- auto zz = ((z.array())*(z.array())).matrix();
149
- auto square_maha = zz.colwise().sum();
150
- return square_maha;
151
- }
152
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ElAnon/6btest/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 6btest
3
- emoji: 📈
4
- colorFrom: green
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.4.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eroggen/ChatGPT4/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Chat-with-GPT4
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.21.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: ysharma/ChatGPT4
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EsoCode/text-generation-webui/modules/logging_colors.py DELETED
@@ -1,117 +0,0 @@
1
- # Copied from https://stackoverflow.com/a/1336640
2
-
3
- import logging
4
- import platform
5
-
6
- logging.basicConfig(
7
- format='%(asctime)s %(levelname)s:%(message)s',
8
- datefmt='%Y-%m-%d %H:%M:%S',
9
- )
10
-
11
-
12
- def add_coloring_to_emit_windows(fn):
13
- # add methods we need to the class
14
- def _out_handle(self):
15
- import ctypes
16
- return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
17
- out_handle = property(_out_handle)
18
-
19
- def _set_color(self, code):
20
- import ctypes
21
-
22
- # Constants from the Windows API
23
- self.STD_OUTPUT_HANDLE = -11
24
- hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
25
- ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
26
-
27
- setattr(logging.StreamHandler, '_set_color', _set_color)
28
-
29
- def new(*args):
30
- FOREGROUND_BLUE = 0x0001 # text color contains blue.
31
- FOREGROUND_GREEN = 0x0002 # text color contains green.
32
- FOREGROUND_RED = 0x0004 # text color contains red.
33
- FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
34
- FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
35
- # winbase.h
36
- # STD_INPUT_HANDLE = -10
37
- # STD_OUTPUT_HANDLE = -11
38
- # STD_ERROR_HANDLE = -12
39
-
40
- # wincon.h
41
- # FOREGROUND_BLACK = 0x0000
42
- FOREGROUND_BLUE = 0x0001
43
- FOREGROUND_GREEN = 0x0002
44
- # FOREGROUND_CYAN = 0x0003
45
- FOREGROUND_RED = 0x0004
46
- FOREGROUND_MAGENTA = 0x0005
47
- FOREGROUND_YELLOW = 0x0006
48
- # FOREGROUND_GREY = 0x0007
49
- FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
50
-
51
- # BACKGROUND_BLACK = 0x0000
52
- # BACKGROUND_BLUE = 0x0010
53
- # BACKGROUND_GREEN = 0x0020
54
- # BACKGROUND_CYAN = 0x0030
55
- # BACKGROUND_RED = 0x0040
56
- # BACKGROUND_MAGENTA = 0x0050
57
- BACKGROUND_YELLOW = 0x0060
58
- # BACKGROUND_GREY = 0x0070
59
- BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
60
-
61
- levelno = args[1].levelno
62
- if (levelno >= 50):
63
- color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
64
- elif (levelno >= 40):
65
- color = FOREGROUND_RED | FOREGROUND_INTENSITY
66
- elif (levelno >= 30):
67
- color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
68
- elif (levelno >= 20):
69
- color = FOREGROUND_GREEN
70
- elif (levelno >= 10):
71
- color = FOREGROUND_MAGENTA
72
- else:
73
- color = FOREGROUND_WHITE
74
- args[0]._set_color(color)
75
-
76
- ret = fn(*args)
77
- args[0]._set_color(FOREGROUND_WHITE)
78
- # print "after"
79
- return ret
80
- return new
81
-
82
-
83
- def add_coloring_to_emit_ansi(fn):
84
- # add methods we need to the class
85
- def new(*args):
86
- levelno = args[1].levelno
87
- if (levelno >= 50):
88
- color = '\x1b[31m' # red
89
- elif (levelno >= 40):
90
- color = '\x1b[31m' # red
91
- elif (levelno >= 30):
92
- color = '\x1b[33m' # yellow
93
- elif (levelno >= 20):
94
- color = '\x1b[32m' # green
95
- elif (levelno >= 10):
96
- color = '\x1b[35m' # pink
97
- else:
98
- color = '\x1b[0m' # normal
99
- args[1].msg = color + args[1].msg + '\x1b[0m' # normal
100
- # print "after"
101
- return fn(*args)
102
- return new
103
-
104
-
105
- if platform.system() == 'Windows':
106
- # Windows does not support ANSI escapes and we are using API calls to set the console color
107
- logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
108
- else:
109
- # all non-Windows platforms are supporting ANSI escapes so we use them
110
- logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
111
- # log = logging.getLogger()
112
- # log.addFilter(log_filter())
113
- # //hdlr = logging.StreamHandler()
114
- # //hdlr.setFormatter(formatter())
115
-
116
- logger = logging.getLogger('text-generation-webui')
117
- logger.setLevel(logging.DEBUG)