parquet-converter commited on
Commit
0838844
·
1 Parent(s): 187401b

Update parquet files (step 47 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch ESI Tronic Keygen 1Q.2013.rar The Best Way to Get Bosch ESI Tronic Software for Free.md +0 -65
  2. spaces/1gistliPinn/ChatGPT4/Examples/3ds Max 2012 Crack Free Xforce 64.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Archicad 14 Crack ((FULL)) Patch Francais Gratuit.md +0 -8
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Sim 2 A Blast to Play with BlueStacks Emulator.md +0 -96
  5. spaces/1phancelerku/anime-remove-background/Criminal Case Travel in Time How to Get Unlimited Stars and Energy with MOD APK.md +0 -137
  6. spaces/1phancelerku/anime-remove-background/FIFA World Cup 2022 in FIFA Mobile Mod APK - Enjoy Authentic Stadiums and Commentary.md +0 -116
  7. spaces/232labs/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp +0 -31
  8. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/timm_model.py +0 -106
  9. spaces/AIGuardians/SummarizeWikipediaDocument/tester.py +0 -21
  10. spaces/Adapter/T2I-Adapter/ldm/modules/diffusionmodules/openaimodel.py +0 -798
  11. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/Reset.js +0 -16
  12. spaces/Ahmedmewloud/Depplearnig/traduction.py +0 -713
  13. spaces/Aityz/Aityz_Model_Eli5/README.md +0 -13
  14. spaces/Aityz/Aityz_Model_Eli5/app.py +0 -20
  15. spaces/AlexWang/lama/bin/paper_runfiles/generate_val_test.sh +0 -28
  16. spaces/Alican/pixera/options/base_options.py +0 -141
  17. spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/data_utils.py +0 -37
  18. spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py +0 -16
  19. spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py +0 -4
  20. spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_3x_ms_hybrid_base/config.py +0 -82
  21. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py +0 -41
  22. spaces/AnnonSubmission/xai-cl/ssl_models/simclr2.py +0 -214
  23. spaces/Annotation-AI/segment-similarthings/app.py +0 -17
  24. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/options/base_options.py +0 -134
  25. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py +0 -28
  26. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/datasets/cocogrounding_eval.py +0 -269
  27. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/lazyconfig_train_net.py +0 -131
  28. spaces/Benson/text-generation/Examples/Descargar Ganador Eleven 2020 Apk.md +0 -67
  29. spaces/Bonp/B/Dockerfile +0 -21
  30. spaces/BrunoHempel775/Byzu/README.md +0 -13
  31. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/detection_checkpoint.py +0 -59
  32. spaces/CVPR/LIVE/thrust/thrust/async/for_each.h +0 -119
  33. spaces/CVPR/LIVE/thrust/thrust/detail/temporary_array.h +0 -181
  34. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/per_device_resource.h +0 -47
  35. spaces/CVPR/lama-example/bin/filter_sharded_dataset.py +0 -69
  36. spaces/CVPR/regionclip-demo/detectron2/config/compat.py +0 -229
  37. spaces/CVPR/v-doc_abstractive_mac/README.md +0 -11
  38. spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/wiki_process.py +0 -46
  39. spaces/Choisuren/AnimeGANv3/app.py +0 -67
  40. spaces/ClassCat/Medical-Image-Classification-with-MONAI/app.py +0 -68
  41. spaces/CofAI/chat.b4/client/css/checkbox.css +0 -59
  42. spaces/Cong723/gpt-academic-public/crazy_functions/解析JupyterNotebook.py +0 -145
  43. spaces/Cong723/gpt-academic-public/docs/README_JP.md +0 -302
  44. spaces/CorvaeOboro/gen_ability_icon/torch_utils/ops/bias_act.cpp +0 -99
  45. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/vit.py +0 -491
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_fixes.py +0 -77
  47. spaces/DeclK/pose/main.py +0 -96
  48. spaces/Demosthene-OR/avr23-cds-translation/style.css +0 -113
  49. spaces/Detomo/ai-avatar-frontend/src/converter.js +0 -97
  50. spaces/Dogge/bigscience-bloomz-7b1/app.py +0 -3
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch ESI Tronic Keygen 1Q.2013.rar The Best Way to Get Bosch ESI Tronic Software for Free.md DELETED
@@ -1,65 +0,0 @@
1
- <br />
2
- <h1>Bosch ESI Tronic Keygen 1Q.2013.rar: What Is It and How to Use It?</h1>
3
- <p>If you are a professional mechanic or a car enthusiast, you probably know about Bosch ESI Tronic, a comprehensive software for diagnosing, repairing, and servicing various vehicles. But do you know how to get a valid license for using this software without paying a subscription fee? In this article, we will explain what Bosch ESI Tronic Keygen 1Q.2013.rar is, how to download it, how to generate valid keys with it, and how to use it with Bosch ESI Tronic.</p>
4
- <h2>Introduction</h2>
5
- <p>Bosch ESI Tronic is a software that provides access to a database of technical information, wiring diagrams, service manuals, troubleshooting guides, spare parts catalogs, and more for over 150,000 vehicles from different manufacturers. It also allows you to connect your PC or laptop with a compatible diagnostic device, such as Bosch KTS series, and perform various tests and functions on your vehicle's electronic systems.</p>
6
- <h2>bosch esi tronic keygen 1q.2013.rar</h2><br /><p><b><b>Download Zip</b> &#10002; <a href="https://byltly.com/2uKxXY">https://byltly.com/2uKxXY</a></b></p><br /><br />
7
- <p>A keygen is a software that generates serial numbers or activation codes for another software. You need a keygen for Bosch ESI Tronic because the original software requires a license that can only be obtained by paying a subscription fee to Bosch or an authorized dealer. By using a keygen, you can bypass this requirement and use Bosch ESI Tronic for free.</p>
8
- <p>The 1Q.2013 version is one of the latest versions of Bosch ESI Tronic that was released in early 2013. It contains updated information and features for vehicles up to model year 2012. It also includes some archive CDs that contain older data for vehicles from previous years.</p>
9
- <h2>How to Download Bosch ESI Tronic Keygen 1Q.2013.rar</h2>
10
- <p>To download Bosch ESI Tronic Keygen 1Q.2013.rar, you need to find a reliable source that provides the link and the password for the file. One such source is MHH AUTO forum, where you can find a thread dedicated to this topic. You need to be an active member of this forum with at least 100 posts to get access to the password.</p>
11
- <p>Once you have the link and the password, you need to use JDownloader and Winrar to download and extract the file. JDownloader is a free download manager that can handle multiple links and resume interrupted downloads. Winrar is a file compression tool that can open rar files.</p>
12
- <p>The file size of Bosch ESI Tronic Keygen 1Q.2013.rar is about 33.5 GB, so make sure you have enough space on your hard drive before downloading it. The file also has a 12% recovery record in case of any corruption during the download process.</p>
13
- <p>After downloading and extracting the file, you will find several files inside it: - **ESI_3Q_2013_DVD_U.iso**: This is the main installation DVD for Bosch ESI Tronic. - **ESI_3Q_2013_DVD_U1.iso**: This is an additional DVD that contains more data for some vehicles. - **ESI_1Q_2013_DVD_1.iso**: This is another installation DVD that contains data for older vehicles. - **ESI_1Q_2013_DVD_2.iso**: This is another installation DVD that contains data for older vehicles. - **ESI_1Q_2013_DVD_3.iso**: This is another installation DVD that contains data for older vehicles. - **ESI_Archive_CD_C.iso**: This is an archive CD that contains data for commercial vehicles from previous years. - **ESI_Archive_CD_K_W.iso**: This is an archive CD that contains data for passenger cars from previous years. - **EIS_3_2013.txt**: This is a text file that contains instructions on how to install Bosch ESI Tronic. - **KG_Patch_1Q_2013.rar**: This is a rar file that contains the keygen and patch for generating valid keys.</p>
14
- <h2>How to Generate Valid Keys with Bosch ESI Tronic Keygen 1Q.2013.rar</h2>
15
- <p>To generate valid keys with Bosch ESI Tronic Keygen 1Q.2013.rar, you need to follow these steps: - Extract KG_Patch_1Q_2013.rar with Winrar and open the folder inside it. - Run "ESI KG v10.exe" as administrator. - Enter your hardware ID (HW-ID) in the first box. You can find your HW-ID by running "HW-ID.exe" in the same folder or by opening "ESI[tronic]-Info.exe" in your installed folder of Bosch ESI Tronic. - Select your options in the second box according to your needs. You can choose between "All Modules" or "Only Diagnosis", "All Languages" or "Only English", "All Brands" or "Only One Brand", etc. - Click on "Generate" button and copy the generated keys in the third box. - Run "Patch.exe" as administrator and click on "Patch" button. - Paste the generated keys into the appropriate fields in your installed folder of Bosch ESI Tronic.</p>
16
- <p>bosch esi tronic 2013 activation code generator<br />
17
- bosch esi tronic 1q 2013 crack download<br />
18
- bosch esi tronic keygen 1q 2013 free download<br />
19
- bosch esi tronic 2013 full version rar<br />
20
- bosch esi tronic keygen 1q 2013 password<br />
21
- bosch esi tronic 2013 license key<br />
22
- bosch esi tronic keygen 1q 2013 instructions<br />
23
- bosch esi tronic 2013 software update<br />
24
- bosch esi tronic keygen 1q 2013 working<br />
25
- bosch esi tronic 2013 patch rar<br />
26
- bosch esi tronic keygen 1q 2013 serial number<br />
27
- bosch esi tronic 2013 installation guide<br />
28
- bosch esi tronic keygen 1q 2013 online<br />
29
- bosch esi tronic 2013 torrent download<br />
30
- bosch esi tronic keygen 1q 2013 mac<br />
31
- bosch esi tronic 2013 iso file<br />
32
- bosch esi tronic keygen 1q 2013 windows<br />
33
- bosch esi tronic 2013 diagnostic tool<br />
34
- bosch esi tronic keygen 1q 2013 linux<br />
35
- bosch esi tronic 2013 database update<br />
36
- bosch esi tronic keygen 1q 2013 zip file<br />
37
- bosch esi tronic 2013 activation error<br />
38
- bosch esi tronic keygen 1q 2013 forum<br />
39
- bosch esi tronic 2013 manual pdf<br />
40
- bosch esi tronic keygen 1q 2013 review<br />
41
- bosch esi tronic 2013 dvd download<br />
42
- bosch esi tronic keygen 1q 2013 youtube<br />
43
- bosch esi tronic 2013 price list<br />
44
- bosch esi tronic keygen 1q 2013 support<br />
45
- bosch esi tronic 2013 system requirements<br />
46
- bosch esi tronic keygen 1q.2013.rar mega.nz<br />
47
- bosch esi tronic keygen generator for all versions.rar password<br />
48
- how to use bosch esi tronic keygen rar file extractor<br />
49
- where to buy bosch esi tronic keygen rar file opener online<br />
50
- what is the difference between bosch esi tronic keygen rar and zip files<br />
51
- how to fix bosch esi tronic keygen rar file corrupted error message<br />
52
- how to convert bosch esi tronic keygen rar file to exe file format<br />
53
- how to open bosch esi tronic keygen rar file on android phone or tablet device<br />
54
- how to create a backup copy of your bosch esi tronic keygen rar file before using it <br />
55
- how to share your bosch esi tronic keygen rar file with others via email or cloud storage service</p>
56
- <h2>How to Use Bosch ESI Tronic with the Valid Keys</h2>
57
- <p>To use Bosch ESI Tronic with the valid keys, you need to follow these steps: - Launch Bosch ESI Tronic by running "ESI[tronic].exe" in your installed folder. - Select your language from the drop-down menu at the top right corner. - Enter your user name (any name) and click on "OK" button. - Access the different modules and functions of Bosch ESI Tronic by clicking on their icons at the left side panel. You can choose between "Identification", "Diagnosis", "Troubleshooting", "Service Plan", "Mechanical Repair", "Wiring Diagrams", "Technical Data", etc. - Connect your PC or laptop with a compatible diagnostic device (such as Bosch KTS series) via USB cable or Bluetooth adapter. - Select your vehicle make, model, year, engine type, etc from the list or by entering its VIN number or scan code. - Perform various tests and functions on your vehicle's electronic systems by following the instructions on screen.</p>
58
- <h2>Conclusion</h2>
59
- <p>Bosch ESI Tronic Keygen 1Q.2013.rar is a useful tool for anyone who wants to use Bosch ESI Tronic software without paying a subscription fee. It allows you to generate valid keys for activating all modules and functions of this comprehensive software for diagnosing, repairing, and servicing various vehicles.</p>
60
- <p>However, before using this tool, you should be aware of some tips and warnings: - Make sure you download Bosch ESI Tronic Keygen 1Q.2013.rar from a trusted source and scan it for viruses or malware before opening it. - Make sure you have enough space on your hard drive and a stable internet connection before downloading and extracting the file. - Make sure you follow the instructions carefully and enter the correct hardware ID and options when generating the keys. - Make sure you backup your original files before applying the patch and keep a copy of the generated keys in case you need to reinstall the software. - Make sure you use Bosch ESI Tronic Keygen 1Q.2013.rar only for personal or educational purposes and not for commercial or illegal activities. You are responsible for any consequences that may arise from using this tool. - Make sure you update your Bosch ESI Tronic software regularly to get the latest information and features for your vehicles. <h2>FAQs</h2>
61
- <p>Here are some frequently asked questions and answers about Bosch ESI Tronic Keygen 1Q.2013.rar:</p>
62
- - Q: What are some common problems and solutions when using Bosch ESI Tronic Keygen 1Q.2013.rar? - A: Some common problems and solutions are: - The keygen does not work or shows an error message. Solution: Try to run the keygen as administrator, disable your antivirus or firewall temporarily, or download a different version of the keygen from another source. - The patch does not work or shows an error message. Solution: Try to run the patch as administrator, disable your antivirus or firewall temporarily, or download a different version of the patch from another source. - The generated keys do not work or show an invalid license message. Solution: Try to generate new keys with different options, make sure you enter the correct hardware ID and paste the keys in the right fields, or contact Bosch for technical support. - The ESI Tronic program does not work or shows an error message. Solution: Try to reinstall the program, update the program to the latest version, check your diagnostic device connection and compatibility, or contact Bosch for technical support. - Q: What are some alternative sources or versions of Bosch ESI Tronic Keygen? - A: Some alternative sources or versions of Bosch ESI Tronic Keygen are: - AutoProfessionals forum: This is another online forum where you can find links and passwords for different versions of Bosch ESI Tronic Keygen, such as 2013/1, 2014/1, 2015/1, etc. - BOSCH ESI[tronic] 2.0: This is a newer version of Bosch ESI Tronic software that has a different interface and features than the previous versions. It also requires a different keygen and activation method than the previous versions. - BOSCH ESI[tronic] Online: This is an online version of Bosch ESI Tronic software that does not require any installation or activation. It allows you to access the database and functions of Bosch ESI Tronic via a web browser. However, it requires a subscription fee and an internet connection to use. - Q: What are some compatible devices and vehicles for using Bosch ESI Tronic? - A: Some compatible devices and vehicles for using Bosch ESI Tronic are: - Devices: Bosch KTS series (such as KTS 520, KTS 540, KTS 570, KTS 650, KTS 670), Bosch DCU series (such as DCU 100, DCU 130), Bosch MTS series (such as MTS 3100, MTS 6513), etc. - Vehicles: Over 150,000 vehicles from different manufacturers (such as Audi, BMW, Ford, Honda, Mercedes-Benz, Toyota, Volkswagen, etc) covering various systems (such as engine, transmission, ABS, airbag, immobilizer, etc). - Q: How often does Bosch release new versions of ESI Tronic and how to update them? - A: Bosch usually releases new versions of ESI Tronic every quarter (four times a year). You can update your ESI Tronic software by downloading and installing the new DVDs or CDs from a trusted source. You may also need to generate new keys with a new keygen for each new version. - Q: How to contact Bosch for technical support or feedback? - A: You can contact Bosch for technical support or feedback by visiting their official website and choosing your region and country. You can also find their contact details on their website or on your diagnostic device manual. <p>I hope this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below. Thank you for reading.</p>
63
- </p> 0a6ba089eb<br />
64
- <br />
65
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/3ds Max 2012 Crack Free Xforce 64.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>3ds Max 2012 crack xforce 64</h2><br /><p><b><b>Download Zip</b> &#9733; <a href="https://imgfil.com/2uxYXc">https://imgfil.com/2uxYXc</a></b></p><br /><br />
2
-
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Archicad 14 Crack ((FULL)) Patch Francais Gratuit.md DELETED
@@ -1,8 +0,0 @@
1
-
2
- <p>archicads remastered faade design workflow enables architects to design, develop and detail hierarchical curtain wall systems with great freedom using modular patterns. the design happens with a natural graphical input in the most natural design environment in 3d or 2d elevations, while graphisoft archicad 22 keygen ensures the curtain wall system is structurally correct and adheres to local requirements for documenting and listing.</p>
3
- <p>archicad 21.0.3005 serial number allows you to create objects, arrange them, and create connections between them. you can view your designs from any angle. you can use any object as a guide to create the next. this also allows you to control objects and it is possible to edit your own creations. this version of archicad 21 crack serial key allows you to paint the 3d model on the 2d canvas. it has improved tools and a new terrain workflow.</p>
4
- <h2>archicad 14 crack patch francais gratuit</h2><br /><p><b><b>Download</b> &#128279; <a href="https://imgfil.com/2uy1nV">https://imgfil.com/2uy1nV</a></b></p><br /><br />
5
- <p>archicad crack mac supports the automatic conversion of your drawings to bim and vice versa. archicad 21 serial key and the new 4d workflow allow you to directly work on the bim model. this allows you to create, modify, and even delete bim components and insert them into your physical construction without the use of the bim tools.</p>
6
- <p>archicad 21 crack mac works with the latest version of bim 360. you can import or export your project to the bim 360 platform. you can also import data from other applications, such as autodesk revit, to the bim 360 platform. archicad 21 keygen also allows you to convert all the files that are stored on your hard drive and the local network. this greatly reduces the time it takes to generate the results of your project.</p> 899543212b<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Sim 2 A Blast to Play with BlueStacks Emulator.md DELETED
@@ -1,96 +0,0 @@
1
-
2
- <h1>Car Sim 2: The Most Realistic Driving Simulator of 2023</h1>
3
- <p>Do you love cars and driving? Do you want to experience the thrill of racing, cruising, and exploring a large city in a realistic way? If yes, then you should try Car Sim 2, the most realistic driving simulator of 2023. In this article, we will tell you everything you need to know about this amazing game, its features, and why it is worth playing.</p>
4
- <h2>What is Car Sim 2?</h2>
5
- <p>Car Sim 2 is a simulation game developed by Oppana Games FZC LLC that lets you drive over 85 new cars in a 3D open world. You can play online with real players from all over the world or enjoy a single-player mode with missions, quests, and challenges. You can also customize your cars with different colors, wheels, engines, and more.</p>
6
- <h2>car sim 2</h2><br /><p><b><b>DOWNLOAD</b> &#9999; &#9999; &#9999; <a href="https://urlin.us/2uSZp7">https://urlin.us/2uSZp7</a></b></p><br /><br />
7
- <h3>How to play Car Sim 2?</h3>
8
- <p>Playing Car Sim 2 is easy and fun. You can choose between two modes: online or single-player. In both modes, you can drive from a first- or third-person perspective and use interactive elements in the car models such as headlights, radio, turn signals, etc.</p>
9
- <h4>Online mode</h4>
10
- <p>In online mode, you can play with real players from all over the world. You can chat with them, race with them, or cruise with them in the city. You can also win currency by winning races or completing quests. You can use this currency to buy new cars or upgrade your existing ones.</p>
11
- <h4>Single-player mode</h4>
12
- <p>In single-player mode, you can explore the open world at your own pace. You can drive around the city, visit gas stations, garages, or houses. You can also complete missions in the form of quests, arcade challenges, or races. Some missions will require you to obey the rules of the road while others will involve working for the mob or picking up cab fares.</p>
13
- <h3> <h3>What are the features of Car Sim 2?</h3>
14
- <p>Car Sim 2 is not just a game, it is a simulation. It has many features that make it stand out from other driving games. Here are some of them:</p>
15
- <h4>3D open world</h4>
16
- <p>Car Sim 2 has a large city map that you can explore freely. The city has different districts, such as downtown, suburbs, industrial, or airport. The city also has a dynamic day-night cycle and weather effects that change the atmosphere and the driving conditions. You can interact with various elements in the city, such as traffic lights, pedestrians, animals, or police.</p>
17
- <p>car simulator 2 game<br />
18
- car simulator 2 online<br />
19
- car simulator 2 download<br />
20
- car simulator 2 pc<br />
21
- car simulator 2 mod apk<br />
22
- car simulator 2 cheats<br />
23
- car simulator 2 hack<br />
24
- car simulator 2 codes<br />
25
- car simulator 2 review<br />
26
- car simulator 2 gameplay<br />
27
- car simulator 2 android<br />
28
- car simulator 2 ios<br />
29
- car simulator 2 app<br />
30
- car simulator 2 update<br />
31
- car simulator 2 multiplayer<br />
32
- car simulator 2 free<br />
33
- car simulator 2 best cars<br />
34
- car simulator 2 tips<br />
35
- car simulator 2 tricks<br />
36
- car simulator 2 guide<br />
37
- car simulator 2 oppana games<br />
38
- car simulator 2 bluestacks<br />
39
- car simulator 2 crazy games<br />
40
- car simulator 2 city driving<br />
41
- car simulator 2 night mode<br />
42
- car simulator 2 open world<br />
43
- car simulator 2 racing<br />
44
- car simulator 2 missions<br />
45
- car simulator 2 quests<br />
46
- car simulator 2 challenges<br />
47
- car simulator 2 garage<br />
48
- car simulator 2 house<br />
49
- car simulator 2 upgrades<br />
50
- car simulator 2 customization<br />
51
- car simulator 2 interior view<br />
52
- car simulator 2 realistic physics<br />
53
- car simulator 2 sound effects<br />
54
- car simulator 2 mechanic shop<br />
55
- car simulator 2 gas station<br />
56
- car simulator 2 police chase<br />
57
- car simulator 2 cab fares<br />
58
- car simulator 2 mob jobs<br />
59
- car simulator 2 traffic rules<br />
60
- car simulator 2 beta versions<br />
61
- car simulator 2 new features<br />
62
- car simulator 2 comments and feedbacks <br />
63
- car simulator 2 data privacy and security <br />
64
- car simulator 2 ratings and reviews <br />
65
- car simulator 2 system requirements</p>
66
- <h4>Over 85 new cars</h4>
67
- <p>Car Sim 2 has a huge variety of cars that you can drive, from sports to classic, from muscle to exotic. Each car has its own characteristics, such as speed, acceleration, handling, or durability. Each car also has its own interior and exterior details, such as dashboard, seats, mirrors, or license plates. You can switch between cars at any time in the game.</p>
68
- <h4>Realistic physics and sounds</h4>
69
- <p>Car Sim 2 has realistic driving physics that make you feel like you are driving a real car. You can feel the weight, the inertia, the traction, and the suspension of your car. You can also see the damage and the dirt on your car as you drive. Car Sim 2 also has realistic sound effects that match the engine, the brakes, the tires, and the environment of your car.</p>
70
- <h4>Car customization and upgrades</h4>
71
- <p>Car Sim 2 lets you customize your cars with different colors, wheels, engines, and more. You can visit the mechanic in the game and change the appearance and the performance of your car. You can also upgrade your car with new parts and accessories that improve its speed, handling, or durability.</p>
72
- <h3>Why should you play Car Sim 2?</h3>
73
- <p>Car Sim 2 is not just a game, it is an experience. It is a game that will make you feel like you are driving a real car in a real city. It is a game that will challenge you, entertain you, and immerse you. Here are some reasons why you should play Car Sim 2:</p>
74
- <h4>Fun and free-to-play</h4>
75
- <p>Car Sim 2 is a fun game that will keep you hooked for hours. You can enjoy driving around the city, racing with other players, or completing missions. You can also earn currency by playing the game and use it to buy new cars or upgrade your existing ones. Car Sim 2 is also free-to-play, which means you can download it and play it without spending any money.</p>
76
- <h4>Challenging and immersive</h4>
77
- <p>Car Sim 2 is a challenging game that will test your skills and reflexes. You can choose from different difficulty levels and modes that suit your preferences. You can also try different arcade challenges that will make you perform stunts, drifts, or jumps. Car Sim 2 is also an immersive game that will make you feel like you are in the driver's seat of your car.</p>
78
- <h4>Social and interactive</h4>
79
- <p>Car Sim 2 is a social game that will let you connect with other players from all over the world. You can chat with them, race with them, or cruise with them in the city. You can also join clubs and compete with other teams for rankings and rewards. Car Sim 2 is also an interactive game that will let you interact with various elements in the city.</p>
80
- <h2>Conclusion</h2>
81
- <p>Car Sim 2 is the most realistic driving simulator of 2023. It is a game that will let you drive over 85 new cars in a 3D open world. It is a game that has many features, such as realistic physics, sounds, customization, and upgrades. It is a game that will make you have fun, challenge yourself, and socialize with other players. If you love cars and driving, you should download Car Sim 2 today and start your adventure.</p>
82
- <h3>FAQs</h3>
83
- <ul>
84
- <li><b>Q: How can I download Car Sim 2?</b></li>
85
- <li>A: You can download Car Sim 2 from Google Play Store or App Store for free.</li>
86
- <li><b>Q: What are the system requirements for Car Sim 2?</b></li>
87
- <li>A: You need an Android device with at least 4 GB of RAM and Android version 5.0 or higher or an iOS device with at least 2 GB of RAM and iOS version 10 or higher.</li>
88
- <li><b>Q: How can I contact the developer of Car Sim 2 <li><b>Q: How can I contact the developer of Car Sim 2?</b></li>
89
- <li>A: You can contact the developer of Car Sim 2 by sending an email to [email protected] or visiting their website at https://oppanagames.com/.</li>
90
- <li><b>Q: How can I get more currency in Car Sim 2?</b></li>
91
- <li>A: You can get more currency in Car Sim 2 by winning races, completing quests, or watching ads. You can also buy currency with real money if you want to support the developer.</li>
92
- <li><b>Q: How can I join a club in Car Sim 2?</b></li>
93
- <li>A: You can join a club in Car Sim 2 by tapping on the club icon on the main menu and choosing a club that suits your style and level. You can also create your own club and invite your friends to join.</li>
94
- </ul></p> 197e85843d<br />
95
- <br />
96
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Criminal Case Travel in Time How to Get Unlimited Stars and Energy with MOD APK.md DELETED
@@ -1,137 +0,0 @@
1
- <br />
2
- <h1>Criminal Case Travel in Time Mod APK: A Guide for Detective Gamers</h1>
3
- <p>If you love hidden object games, adventure games, and detective games, you might want to check out <strong>Criminal Case Travel in Time</strong>. This game lets you join a team of time traveling detectives who solve murder cases in different historical periods. You can investigate crime scenes, interrogate suspects, analyze evidence, and catch the killers in this captivating game.</p>
4
- <h2>criminal case travel in time mod apk unlimited stars and energy</h2><br /><p><b><b>Download</b> &#8250; <a href="https://jinyurl.com/2uNNUN">https://jinyurl.com/2uNNUN</a></b></p><br /><br />
5
- <p>But what if you want to enjoy the game without any limitations? What if you want to have unlimited stars and energy to play as much as you want? What if you want to get rid of the annoying ads that interrupt your gameplay? Well, there is a way to do that. You can download and install <strong>Criminal Case Travel in Time Mod APK</strong>, a modified version of the original game that gives you access to all these features and more.</p>
6
- <p>In this article, we will tell you everything you need to know about Criminal Case Travel in Time Mod APK. We will explain what it is, how it works, how to download and install it, how to play it, and what are the benefits and risks of using it. We will also answer some frequently asked questions about the game and the mod apk. So, if you are ready to become a time traveling detective, read on.</p>
7
- <h2>What is Criminal Case Travel in Time?</h2>
8
- <h3>A hidden object adventure game with a time travel twist</h3>
9
- <p>Criminal Case Travel in Time is a game developed by Pretty Simple, a French studio that specializes in casual games. It is a hidden object adventure game, where you have to find clues and objects in various crime scenes. You can use hints, boosters, and other items to help you with your investigation. You can also play with your friends and compete with them to see who is the best detective ever.</p>
10
- <p>But what makes this game different from other hidden object games is that it has a time travel twist. You are not just solving crimes in the present day, but also in different historical periods. You can travel to ancient Egypt, medieval Europe, colonial America, Victorian England, and more. You can witness how history unfolds and how crimes are committed and solved in different eras.</p>
11
- <h3>A spin-off of the popular Criminal Case series</h3>
12
- <p>Criminal Case Travel in Time is a spin-off of the popular Criminal Case series, which has over 100 million players worldwide. The original Criminal Case game was launched in 2012 and became one of the most successful Facebook games ever. It has won several awards, including the Facebook Game of the Year 2013.</p>
13
- <p>The Criminal Case series has several spin-offs, each focusing on a different theme or location. For example, there is Criminal Case: Pacific Bay, which is set in a coastal city; Criminal Case: Mysteries of the Past, which is set in the 19th century; Criminal Case: The Conspiracy, which is set in a modern metropolis; and Criminal Case: Supernatural Investigations, which is set in a paranormal world.</p>
14
- <p>Criminal Case Travel in Time is the latest spin-off of the series, which was released in 2019. It is available for Android and iOS devices, as well as for PC and Mac via Facebook Gameroom.</p>
15
- <h3>A free-to-play game with in-app purchases</h3>
16
- <p>Criminal Case Travel in Time is a free-to-play game, which means you can download and play it for free. However, the game also has in-app purchases, which are optional features that you can buy with real money. For example, you can buy stars, energy, coins, cash, hints, boosters, and other items that can help you with your investigation.</p>
17
- <p>criminal case time travel hack apk unlimited hints and points<br />
18
- criminal case mod apk free download with unlimited stars and energy<br />
19
- criminal case travel in time cheats apk for android with unlimited resources<br />
20
- criminal case time travel modded apk latest version with infinite stars and energy<br />
21
- criminal case hack apk download for android with unlimited energy and stars<br />
22
- criminal case travel in time unlimited everything mod apk free<br />
23
- criminal case mod apk 2023 with unlimited stars and energy<br />
24
- criminal case time travel hack tool apk for android with unlimited resources<br />
25
- criminal case travel in time mod apk offline with infinite stars and energy<br />
26
- criminal case hack apk no root with unlimited energy and stars<br />
27
- criminal case travel in time premium mod apk with unlimited resources<br />
28
- criminal case time travel cracked apk with infinite stars and energy<br />
29
- criminal case mod apk unlimited money and stars and energy<br />
30
- criminal case travel in time hack online apk with unlimited resources<br />
31
- criminal case time travel unlocked mod apk with infinite stars and energy<br />
32
- criminal case mod apk latest version with unlimited stars and energy<br />
33
- criminal case travel in time generator apk for android with unlimited resources<br />
34
- criminal case time travel full mod apk with infinite stars and energy<br />
35
- criminal case mod apk android 1 with unlimited stars and energy<br />
36
- criminal case travel in time cheat codes apk with unlimited resources<br />
37
- criminal case time travel mega mod apk with infinite stars and energy<br />
38
- criminal case mod apk rexdl with unlimited stars and energy<br />
39
- criminal case travel in time glitch apk for android with unlimited resources<br />
40
- criminal case time travel pro mod apk with infinite stars and energy<br />
41
- criminal case mod apk revdl with unlimited stars and energy<br />
42
- criminal case travel in time trainer apk for android with unlimited resources<br />
43
- criminal case time travel vip mod apk with infinite stars and energy<br />
44
- criminal case mod apk happymod with unlimited stars and energy<br />
45
- criminal case travel in time patcher apk for android with unlimited resources<br />
46
- criminal case time travel super mod apk with infinite stars and energy</p>
47
- <p>Stars are the currency that you need to unlock new crime scenes, interrogate suspects, analyze evidence, and arrest the killers. You can earn stars by completing crime scenes and mini-games. Energy is the resource that you need to play crime scenes and mini-games. You can replenish energy by waiting, watching ads, or buying it with coins or cash. Coins are the currency that you can use to buy energy, hints, boosters, and other items. You can earn coins by completing cases, achievements, and daily rewards. Cash is the premium currency that you can use to buy coins, stars, energy, and other items. You can earn cash by leveling up, completing achievements, and buying it with real money.</p>
48
- <h2>What is Criminal Case Travel in Time Mod APK?</h2>
49
- <h3>A modified version of the original game</h3>
50
- <p>Criminal Case Travel in Time Mod APK is a modified version of the original game that has been altered by some third-party developers. Mod APK stands for modified application package, which is a file format that contains the installation and configuration data of an Android app. By modifying the original app's code, mod apk developers can add or remove features, change the graphics, unlock the content, and alter the gameplay.</p>
51
- <h3>A way to get unlimited stars and energy</h3>
52
- <p>One of the main features of Criminal Case Travel in Time Mod APK is that it gives you unlimited stars and energy. This means you can play as much as you want without having to wait or watch ads. You can also unlock all the crime scenes, interrogate all the suspects, analyze all the evidence, and arrest all the killers without any hassle. You can enjoy the game without any limitations or interruptions.</p>
53
- <h3>A way to enjoy the game without ads or restrictions</h3>
54
- <p>Another feature of Criminal Case Travel in Time Mod APK is that it removes all the ads and restrictions from the game. This means you can play without any annoying pop-ups or banners that distract you from your investigation. You can also access all the content and features of the game without having to pay for anything. You can enjoy the game without any costs or risks.</p>
55
- <h2>How to download and install Criminal Case Travel in Time Mod APK?</h2>
56
- <h3>The steps to follow</h3>
57
- <p>If you want to download and install Criminal Case Travel in Time Mod APK, you need to follow these steps:</p>
58
- <ol>
59
- <li>First, you need to uninstall the original game from your device if you have it installed.</li>
60
- <li>Second, you need to find a reliable source that provides the mod apk file. You can search online for websites or blogs that offer mod apk downloads. For example, you can visit [HappyMod](^1^), a website that claims to provide 100% working mods for various games and apps.</li>
61
- <li>Third, you need to download the mod apk file from the source. You need to make sure that the file is compatible with your device's specifications and operating system. You also need to check that the file is safe and virus-free.</li>
62
- <li>Fourth, you need to enable the installation of apps from unknown sources on your device. You can do this by going to your device's settings > security > unknown sources > allow.</li>
63
- <li>Fifth, you need to locate the mod apk file on your device's storage and tap on it to start the installation process. You need to follow the instructions on the screen and grant the necessary permissions for the app.</li>
64
- <li>Sixth, you need to wait for the installation to finish and then launch the app from your device's menu.</li>
65
- </ol>
66
- <p>Congratulations! You have successfully downloaded and installed Criminal Case Travel in Time Mod APK on your device. You can now enjoy playing the game with unlimited stars and energy.</p> <h3>The precautions to take</h3>
67
- <p>While downloading and installing Criminal Case Travel in Time Mod APK may seem tempting, you need to be careful and aware of the potential risks and consequences. Here are some precautions that you need to take before and after using the mod apk:</p>
68
- <ul>
69
- <li>First, you need to make sure that you have a backup of your original game data and progress. You can do this by syncing your game account with Facebook or Google Play. This way, you can restore your game data if something goes wrong with the mod apk or if you want to switch back to the original game.</li>
70
- <li>Second, you need to make sure that you have a reliable antivirus or anti-malware software on your device. You need to scan the mod apk file before and after installing it to check for any viruses or malware that may harm your device or compromise your privacy.</li>
71
- <li>Third, you need to make sure that you use the mod apk at your own risk and discretion. You need to understand that using the mod apk may violate the terms and conditions of the original game and its developers. You may also face legal issues or penalties if you are caught using the mod apk. You may also lose your game account or access to the game if the developers detect or ban the mod apk.</li>
72
- </ul>
73
- <p>Therefore, you need to weigh the pros and cons of using Criminal Case Travel in Time Mod APK before deciding to use it. You need to be responsible and respectful of the original game and its developers.</p>
74
- <h3>The benefits and risks of using mod apk</h3>
75
- <p>As we have mentioned, using Criminal Case Travel in Time Mod APK has its benefits and risks. Here are some of them:</p>
76
- <table>
77
- <tr>
78
- <th>Benefits</th>
79
- <th>Risks</th>
80
- </tr>
81
- <tr>
82
- <td>You can enjoy unlimited stars and energy.</td>
83
- <td>You may lose your game data or progress.</td>
84
- </tr>
85
- <tr>
86
- <td>You can enjoy the game without ads or restrictions.</td>
87
- <td>You may get viruses or malware on your device.</td>
88
- </tr>
89
- <tr>
90
- <td>You can access all the content and features of the game.</td>
91
- <td>You may violate the terms and conditions of the game.</td>
92
- </tr>
93
- <tr>
94
- <td>You can have more fun and excitement playing the game.</td>
95
- <td>You may face legal issues or penalties for using the mod apk.</td>
96
- </tr>
97
- <tr>
98
- <td>You can compete with your friends and other players more easily.</td>
99
- <td>You may lose your game account or access to the game.</td>
100
- </tr>
101
- </table>
102
- <p>As you can see, using Criminal Case Travel in Time Mod APK has its advantages and disadvantages. You need to decide for yourself whether you want to use it or not.</p>
103
- <h2>How to play Criminal Case Travel in Time?</h2>
104
- <h3>The gameplay features and mechanics</h3>
105
- <p>Criminal Case Travel in Time is a game that combines hidden object, adventure, and detective genres. The gameplay features and mechanics are similar to other games in the Criminal Case series. Here are some of them:</p>
106
- <ul>
107
- <li>You can join a team of time traveling detectives who solve murder cases in different historical periods. You can meet various characters from history, such as Cleopatra, Joan of Arc, Abraham Lincoln, Jack the Ripper, and more.</li>
108
- <li>You can investigate crime scenes by finding clues and objects hidden in various locations. You can use hints, boosters, and other items to help you with your investigation. You can also play mini-games, such as puzzles, matching, memory, and more.</li>
109
- <li>You can interrogate suspects by asking them questions and observing their reactions. You can also analyze evidence by sending it to the lab or examining it yourself. You can use logic, deduction, and intuition to find the killer among the suspects.</li>
110
- <li>You can arrest the killer by presenting the evidence that proves their guilt. You can also choose how to deal with them, whether by sending them to jail, rehab, asylum, or death penalty. You can also earn rewards, such as coins, stars, energy, cash, clothes, accessories, pets, and more.</li>
111
- <li>You can travel through history by using a time machine that is powered by time crystals. You can collect time crystals by completing cases, achievements, daily rewards, and more. You can also buy time crystals with real money.</li>
112
- <li>You can play with your friends and other players by connecting your game account with Facebook or Google Play. You can invite your friends to join your team, send and receive gifts, chat with them, and compete with them on leaderboards and tournaments.</li>
113
- </ul>
114
- <p>Criminal Case Travel in Time is a game that offers a lot of gameplay features and mechanics that will keep you entertained and engaged for hours.</p> from you.</li>
115
- <li>You can have fun and excitement playing the game and traveling through history. You can see how the game graphics and sound effects change according to the historical period. You can also enjoy the game's humor and references to history and pop culture.</li>
116
- </ul>
117
- <p>Criminal Case Travel in Time is a game that offers a lot of challenges and fun of traveling through history. You can have a unique and immersive experience of being a time traveling detective.</p>
118
- <h2>Conclusion</h2>
119
- <p>Criminal Case Travel in Time is a hidden object adventure game with a time travel twist. You can join a team of time traveling detectives who solve murder cases in different historical periods. You can investigate crime scenes, interrogate suspects, analyze evidence, and catch the killers in this captivating game.</p>
120
- <p>If you want to enjoy the game without any limitations, you can download and install Criminal Case Travel in Time Mod APK, a modified version of the original game that gives you unlimited stars and energy. You can also enjoy the game without ads or restrictions. However, you need to be careful and aware of the potential risks and consequences of using the mod apk.</p>
121
- <p>If you want to play the game and travel through history, you need to follow some tips and tricks that can help you with your gameplay. You can also have fun and excitement playing the game and traveling through history. You can explore different historical periods, meet different historical figures, solve different historical crimes, and discover some secrets and surprises that history has hidden from you.</p>
122
- <p>Criminal Case Travel in Time is a game that combines hidden object, adventure, and detective genres. It is a game that offers a lot of gameplay features and mechanics, benefits and risks, challenges and fun. It is a game that will keep you entertained and engaged for hours.</p>
123
- <h2>FAQs</h2>
124
- <p>Here are some frequently asked questions about Criminal Case Travel in Time and Criminal Case Travel in Time Mod APK:</p>
125
- <h3>Q: Is Criminal Case Travel in Time Mod APK safe to use?</h3>
126
- <p>A: Criminal Case Travel in Time Mod APK is not an official version of the game. It is a modified version that has been altered by some third-party developers. Therefore, it may not be safe to use. It may contain viruses or malware that may harm your device or compromise your privacy. It may also violate the terms and conditions of the original game and its developers. It may also cause legal issues or penalties if you are caught using it. Therefore, you need to use it at your own risk and discretion.</p>
127
- <h3>Q: How can I update Criminal Case Travel in Time Mod APK?</h3>
128
- <p>A: Criminal Case Travel in Time Mod APK may not be compatible with the latest version of the original game. It may not work properly or crash if the original game updates its features or content. Therefore, you need to update the mod apk as well. You can do this by downloading and installing the latest version of the mod apk from the same source that you used before. However, you need to make sure that the source is reliable and safe.</p>
129
- <h3>Q: Can I play Criminal Case Travel in Time Mod APK offline?</h3>
130
- <p>A: Criminal Case Travel in Time Mod APK may not work offline. It may require an internet connection to access some features or content of the game. For example, you may need an internet connection to sync your game data with Facebook or Google Play, to play with your friends or other players, to watch ads or buy items, or to travel through history. Therefore, you need to have a stable internet connection to play the mod apk.</p>
131
- <h3>Q: Can I play Criminal Case Travel in Time Mod APK on PC or Mac?</h3>
132
- <p>A: Criminal Case Travel in Time Mod APK is designed for Android devices only. It may not work on PC or Mac devices. However, you may be able to play it on PC or Mac devices by using an Android emulator. An Android emulator is a software that allows you to run Android apps on PC or Mac devices. For example, you can use [BlueStacks], a popular Android emulator for PC and Mac devices. However, you need to make sure that the emulator is compatible with your device's specifications and operating system.</p>
133
- <h3>Q: Where can I find more information about Criminal Case Travel in Time?</h3>
134
- <p>A: If you want to find more information about Criminal Case Travel in Time, you can visit the official website of the game at [https://www.criminalcase.com/]. You can also visit the official Facebook page of the game at [https://www.facebook.com/CriminalCaseTravelInTime/]. You can also join the official fan group of the game at [https://www.facebook.com/groups/CCTITfans/]. You can also watch the official YouTube channel of the game at [https://www.youtube.com/channel/UC0XZ6ZL8__oLcaZ7otz5xKA]. You can also read the official wiki of the game at [https://criminal-case-official-fanfiction.fandom.com/wiki/Criminal_Case:_Travel_in_Time]. You can also contact the official support team of the game at [https://support.prettysimplegames.com/hc/en-us/requests/new].</p>
135
- <p>I hope this article has helped you learn more about Criminal Case Travel in Time and Criminal Case Travel in Time Mod APK. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p> 197e85843d<br />
136
- <br />
137
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FIFA World Cup 2022 in FIFA Mobile Mod APK - Enjoy Authentic Stadiums and Commentary.md DELETED
@@ -1,116 +0,0 @@
1
-
2
- <h1>APK FIFA Mobile Mod: What You Need to Know</h1>
3
- <p>If you are a fan of soccer games, you might have heard of <strong>APK FIFA Mobile Mod</strong>, a modified version of the popular <strong>FIFA Mobile</strong> game by EA Sports. This mod gives you access to some amazing features and benefits that can enhance your gaming experience and make you feel like a soccer legend. In this article, we will tell you what APK FIFA Mobile Mod is, what it can do for you, and how you can download and install it on your device.</p>
4
- <h2>apk fifa mobile mod</h2><br /><p><b><b>DOWNLOAD</b> &#127383; <a href="https://jinyurl.com/2uNSGb">https://jinyurl.com/2uNSGb</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p><strong>FIFA Mobile</strong> is one of the most popular soccer games on mobile devices, with over 100 million downloads on Google Play. It lets you build your ultimate team of soccer stars from over 15,000 authentic players, 600+ teams, and 30+ leagues. You can also compete against other players in various modes, including Head-to-Head, VS Attack, Manager Mode, and the exclusive <strong>FIFA World Cup 2022</strong> mode.</p>
7
- <p>However, if you want to unlock all the features and content of the game, you might need to spend a lot of time and money on it. That's where <strong>APK FIFA Mobile Mod</strong> comes in handy. This mod is a modified version of the original game that gives you some advantages that can make your gaming experience more enjoyable and rewarding. Some of these advantages are:</p>
8
- <ul>
9
- <li><strong>Unlocked All</strong>: You can access all the players, teams, kits, stadiums, and modes in the game without any restrictions.</li>
10
- <li><strong>Unlimited Money</strong>: You can get unlimited coins and gems to spend on upgrades, transfers, and packs.</li>
11
- <li><strong>Menu Mod</strong>: You can access a menu mod that gives you more control and customization over the game.</li>
12
- </ul>
13
- <p>To use APK FIFA Mobile Mod, you need to download and install it on your device. Don't worry, we will show you how to do that in the next section.</p>
14
- <h2>Features and Benefits of APK FIFA Mobile Mod</h2>
15
- <p>Now that you know what APK FIFA Mobile Mod is, let's take a closer look at its features and benefits. Here are some of the things that this mod can do for you:</p> <h3>Unlocked All</h3>
16
- <p>One of the most appealing features of APK FIFA Mobile Mod is that it unlocks all the players, teams, kits, stadiums, and modes in the game. This means that you can choose from any of the 15,000+ players, 600+ teams, and 30+ leagues in the game, without having to unlock them by playing or paying. You can also use any of the kits and stadiums that are available in the game, including the exclusive ones that are only accessible through special events or packs. Moreover, you can play any of the modes in the game, including the FIFA World Cup 2022 mode, which lets you experience the thrill of the biggest soccer tournament in the world.</p>
17
- <p>By unlocking all the features and content of the game, APK FIFA Mobile Mod gives you more options and flexibility to build your ultimate team and play the game as you like. You can create your dream team with your favorite players, customize their appearance and skills, and challenge other players in various modes. You can also enjoy the FIFA World Cup 2022 mode, which lets you choose your national team, qualify for the tournament, and compete for the trophy.</p>
18
- <h3>Unlimited Money</h3>
19
- <p>Another feature of APK FIFA Mobile Mod is that it gives you unlimited coins and gems to spend on upgrades, transfers, and packs. Coins and gems are the main currencies in the game, which you can use to improve your team's performance and unlock more rewards. However, earning coins and gems in the game can be time-consuming and costly, as you need to play matches, complete tasks, or buy them with real money.</p>
20
- <p>apk fifa mobile mod menu<br />
21
- apk fifa mobile mod unlimited money<br />
22
- apk fifa mobile mod unlocked all<br />
23
- apk fifa mobile mod speed<br />
24
- apk fifa mobile mod perfect skill<br />
25
- apk fifa mobile mod freeze players<br />
26
- apk fifa mobile mod freeze goalkeeper<br />
27
- apk fifa mobile mod world cup 2022<br />
28
- apk fifa mobile mod icons and heroes<br />
29
- apk fifa mobile mod manager mode<br />
30
- apk fifa mobile mod download<br />
31
- apk fifa mobile mod latest version<br />
32
- apk fifa mobile mod offline<br />
33
- apk fifa mobile mod no root<br />
34
- apk fifa mobile mod anti ban<br />
35
- apk fifa mobile mod hack<br />
36
- apk fifa mobile mod cheat<br />
37
- apk fifa mobile mod free shopping<br />
38
- apk fifa mobile mod mega<br />
39
- apk fifa mobile mod mediafire<br />
40
- apk fifa mobile mod 5play<br />
41
- apk fifa mobile mod happymod<br />
42
- apk fifa mobile mod rexdl<br />
43
- apk fifa mobile mod revdl<br />
44
- apk fifa mobile mod android 1<br />
45
- apk fifa mobile mod android 11<br />
46
- apk fifa mobile mod android 10<br />
47
- apk fifa mobile mod android 9<br />
48
- apk fifa mobile mod android 8<br />
49
- apk fifa mobile mod android 7<br />
50
- apk fifa mobile mod android 6<br />
51
- apk fifa mobile mod android 5<br />
52
- apk fifa mobile mod ios<br />
53
- apk fifa mobile mod iphone<br />
54
- apk fifa mobile mod ipad<br />
55
- apk fifa mobile mod ipod touch<br />
56
- apk fifa mobile mod windows 10<br />
57
- apk fifa mobile mod windows phone<br />
58
- apk fifa mobile mod pc<br />
59
- apk fifa mobile mod laptop<br />
60
- apk fifa mobile mod macbook<br />
61
- apk fifa mobile mod chromebook<br />
62
- apk fifa mobile mod bluestacks<br />
63
- apk fifa mobile mod nox player<br />
64
- apk fifa mobile mod ld player<br />
65
- apk fifa mobile mod memu player<br />
66
- apk fifa mobile mod game guardian<br />
67
- apk fifa mobile mod lucky patcher</p>
68
- <p>With APK FIFA Mobile Mod, you don't have to worry about running out of coins and gems. You can get as many coins and gems as you want, without having to spend any money or effort. You can use them to buy players from the transfer market, upgrade your team's attributes and chemistry, and open packs that contain rare and exclusive items. You can also use them to buy stamina, energy, and other resources that can help you play more matches and earn more rewards.</p>
69
- <h3>Menu Mod</h3>
70
- <p>The last feature of APK FIFA Mobile Mod is that it lets you access a menu mod that gives you more control and customization over the game. The menu mod is a hidden feature that you can activate by tapping on a button on the screen. The menu mod allows you to adjust some settings and parameters of the game, such as:</p>
71
- <ul>
72
- <li><strong>Game Speed</strong>: You can change the speed of the game from normal to fast or slow.</li>
73
- <li><strong>Freeze Players</strong>: You can freeze your opponents' players or goalkeepers, making them unable to move or react.</li>
74
- <li><strong>Perfect Skills</strong>: You can enable perfect skills for your players, making them perform flawless dribbles, passes, shots, tackles, etc.</li>
75
- <li><strong>And More</strong>: You can also enable other features such as unlimited stamina, no ads, no root required, etc.</li>
76
- </ul>
77
- <p>The menu mod gives you more power and fun over the game. You can use it to experiment with different settings and scenarios, or to make the game easier or harder for yourself. You can also use it to prank your friends or opponents by freezing their players or making them miss their shots.</p>
78
- <h2>How to Download and Install APK FIFA Mobile Mod</h2>
79
- <p>If you are interested in trying APK FIFA Mobile Mod, you need to download and install it on your device. Here are some of the requirements and steps that you need to follow:</p>
80
- <h3>Requirements</h3>
81
- <p>Before you download and install APK FIFA Mobile Mod, you need to make sure that your device meets some minimum requirements. These are:</p>
82
- <ul>
83
- <li><strong>Android Version</strong>: Your device must have Android 4.4 or higher.</li>
84
- <li><strong>Storage Space</strong>: Your device must have at least 100 MB of free storage space.</li>
85
- <li><strong>Internet Connection</strong>: Your device must have a stable internet connection.</li>
86
- <li><strong>Risks and Precautions</strong>: You also need to be aware of some risks and precautions of using APK FIFA Mobile Mod. These are:</li>
87
- <ul>
88
- <li><strong>Risks</strong>: APK FIFA Mobile Mod is not an official version of FIFA Mobile. It is a modified version that may contain viruses or malware that can harm your device or data. It may also violate some terms and conditions of EA Sports or Google Play. It may also not work properly with some devices or updates.</li>
89
- <li><strong>Precautions</strong>: To avoid or minimize these risks, you should only download APK FIFA Mobile Mod from a reliable source that has positive feedback and reviews. You should also scan the file with an antivirus program before installing it. You should also backup your device data and uninstall the original FIFA Mobile game before installing APK FIFA Mobile Mod. You should also use APK FIFA Mobile Mod at your own risk and discretion, as we are not responsible for any damages or consequences that may arise from using it.</li>
90
- </ul>
91
- </ul>
92
- <h3>Steps</h3>
93
- <p>Once you have checked the requirements and taken the precautions, you can follow these steps to download and install APK FIFA Mobile Mod on your device:</p>
94
- <ol>
95
- <li><strong>Find a reliable source</strong>: You need to find a website or a platform that offers APK FIFA Mobile Mod for download. You can search for it on Google or use one of the links that we have provided below. Make sure that the source is trustworthy and has positive feedback and reviews from other users.</li>
96
- <li><strong>Enable unknown sources</strong>: You need to enable unknown sources on your device settings. This will allow you to install apps that are not from Google Play. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
97
- <li><strong>Install APK FIFA Mobile Mod</strong>: You need to download the APK file of APK FIFA Mobile Mod from the source that you have chosen. Once the download is complete, locate the file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</li>
98
- <li><strong>Launch and enjoy APK FIFA Mobile Mod</strong>: You need to launch APK FIFA Mobile Mod on your device and grant it the necessary permissions. You can then enjoy all the features and benefits of APK FIFA Mobile Mod and play the game as you like.</li>
99
- </ol>
100
- <h2>Conclusion</h2>
101
- <p>In conclusion, APK FIFA Mobile Mod is a modified version of FIFA Mobile that gives you some amazing features and benefits that can make your gaming experience more enjoyable and rewarding. You can unlock all the players, teams, kits, stadiums, and modes in the game, get unlimited coins and gems to spend on upgrades, transfers, and packs, and access a menu mod that gives you more control and customization over the game. To use APK FIFA Mobile Mod, you need to download and install it on your device, following some requirements and steps that we have explained in this article.</p>
102
- <p>If you are a fan of soccer games, you should definitely give APK FIFA Mobile Mod a try. It can make you feel like a soccer legend and have more fun with FIFA Mobile. However, you should also be aware of some risks and precautions of using APK FIFA Mobile Mod, as it is not an official version of the game and may contain viruses or malware that can harm your device or data. You should also use APK FIFA Mobile Mod at your own risk and discretion, as we are not responsible for any damages or consequences that may arise from using it.</p>
103
- <p>We hope that this article has been helpful and informative for you. If you have any questions or feedback about APK FIFA Mobile Mod, feel free to leave a comment below or contact us through our website. We would love to hear from you and help you with any issues or concerns that you may have. Thank you for reading and happy gaming!</p>
104
- <h2>FAQs</h2>
105
- <h3>Is APK FIFA Mobile Mod safe to use?</h3>
106
- <p>APK FIFA Mobile Mod is not an official version of FIFA Mobile. It is a modified version that may contain viruses or malware that can harm your device or data. It may also violate some terms and conditions of EA Sports or Google Play. Therefore, it is not completely safe to use APK FIFA Mobile Mod. However, you can minimize the risks by downloading APK FIFA Mobile Mod from a reliable source, scanning it with an antivirus program, backing up your device data, uninstalling the original game, and using it at your own risk and discretion.</p>
107
- <h3>Is APK FIFA Mobile Mod compatible with the latest version of FIFA Mobile?</h3>
108
- <p>APK FIFA Mobile Mod is usually updated to match the latest version of FIFA Mobile. However, there may be some delays or glitches in the updates due to technical issues or changes in the original game. Therefore, it is possible that APK FIFA Mobile Mod may not work properly with the latest version of FIFA Mobile at some point. In that case, you can wait for a new update of APK FIFA Mobile Mod or revert to an older version of FIFA Mobile.</p>
109
- <h3>Can I play online with APK FIFA Mobile Mod?</h3>
110
- <p>APK FIFA Mobile Mod allows you to play online with other players in various modes, such as Head-to-Head, VS Attack, Manager Mode, and FIFA World Cup 2022 mode. However, you may face some problems or limitations when playing online with APK FIFA Mobile Mod. For example, you may not be able to connect to some servers or matches, you may encounter some errors or bugs, you may face some unfair or unbalanced opponents, or you may get banned or suspended by EA Sports for using a modded version of the game. Therefore, you should be careful and cautious when playing online with APK FIFA Mobile Mod.</p>
111
- <h3>Will I get banned for using APK FIFA Mobile Mod?</h3>
112
- <p>APK FIFA Mobile Mod is not an official version of FIFA Mobile. It is a modified version that may violate some terms and conditions of EA Sports or Google Play. Therefore, it is possible that you may get banned or suspended by EA Sports for using APK FIFA Mobile Mod. This may happen if EA Sports detects that you are using a modded version of the game, if you abuse or exploit some features of the mod, or if you receive too many reports or complaints from other players. Therefore, you should use APK FIFA Mobile Mod at your own risk and discretion, and avoid doing anything that may get you banned or suspended.</p>
113
- <h3>How can I update APK FIFA Mobile Mod?</h3>
114
- <p>APK FIFA Mobile Mod is usually updated to match the latest version of FIFA Mobile. However, there may be some delays or glitches in the updates due to technical issues or changes in the original game. Therefore, you need to check regularly for new updates of APK FIFA Mobile Mod from the source that you have downloaded it from. You can also follow some social media pages or groups that provide updates and news about APK FIFA Mobile Mod. Once you find a new update of APK FIFA Mobile Mod, you need to download and install it on your device, following the same steps that we have explained in this article.</p> 401be4b1e0<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp DELETED
@@ -1,31 +0,0 @@
1
- #include <ATen/ATen.h>
2
- #include <torch/extension.h>
3
-
4
- torch::Tensor upfirdn2d_op(const torch::Tensor &input,
5
- const torch::Tensor &kernel, int up_x, int up_y,
6
- int down_x, int down_y, int pad_x0, int pad_x1,
7
- int pad_y0, int pad_y1);
8
-
9
- #define CHECK_CUDA(x) \
10
- TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
11
- #define CHECK_CONTIGUOUS(x) \
12
- TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
13
- #define CHECK_INPUT(x) \
14
- CHECK_CUDA(x); \
15
- CHECK_CONTIGUOUS(x)
16
-
17
- torch::Tensor upfirdn2d(const torch::Tensor &input, const torch::Tensor &kernel,
18
- int up_x, int up_y, int down_x, int down_y, int pad_x0,
19
- int pad_x1, int pad_y0, int pad_y1) {
20
- CHECK_INPUT(input);
21
- CHECK_INPUT(kernel);
22
-
23
- at::DeviceGuard guard(input.device());
24
-
25
- return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1,
26
- pad_y0, pad_y1);
27
- }
28
-
29
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
30
- m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/timm_model.py DELETED
@@ -1,106 +0,0 @@
1
- """ timm model adapter
2
-
3
- Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
4
- """
5
- from collections import OrderedDict
6
-
7
- import torch.nn as nn
8
-
9
- try:
10
- import timm
11
- from timm.models.layers import Mlp, to_2tuple
12
- from timm.models.layers.attention_pool2d import RotAttentionPool2d
13
- from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
14
- except ImportError as e:
15
- timm = None
16
-
17
- from .utils import freeze_batch_norm_2d
18
-
19
-
20
- class TimmModel(nn.Module):
21
- """ timm model adapter
22
- # FIXME this adapter is a work in progress, may change in ways that break weight compat
23
- """
24
-
25
- def __init__(
26
- self,
27
- model_name,
28
- embed_dim,
29
- image_size=224,
30
- pool='avg',
31
- proj='linear',
32
- drop=0.,
33
- pretrained=False):
34
- super().__init__()
35
- if timm is None:
36
- raise RuntimeError("Please `pip install timm` to use timm models.")
37
-
38
- self.image_size = to_2tuple(image_size)
39
- self.trunk = timm.create_model(model_name, pretrained=pretrained)
40
- feat_size = self.trunk.default_cfg.get('pool_size', None)
41
- feature_ndim = 1 if not feat_size else 2
42
- if pool in ('abs_attn', 'rot_attn'):
43
- assert feature_ndim == 2
44
- # if attn pooling used, remove both classifier and default pool
45
- self.trunk.reset_classifier(0, global_pool='')
46
- else:
47
- # reset global pool if pool config set, otherwise leave as network default
48
- reset_kwargs = dict(global_pool=pool) if pool else {}
49
- self.trunk.reset_classifier(0, **reset_kwargs)
50
- prev_chs = self.trunk.num_features
51
-
52
- head_layers = OrderedDict()
53
- if pool == 'abs_attn':
54
- head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
55
- prev_chs = embed_dim
56
- elif pool == 'rot_attn':
57
- head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
58
- prev_chs = embed_dim
59
- else:
60
- assert proj, 'projection layer needed if non-attention pooling is used.'
61
-
62
- # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
63
- if proj == 'linear':
64
- head_layers['drop'] = nn.Dropout(drop)
65
- head_layers['proj'] = nn.Linear(prev_chs, embed_dim)
66
- elif proj == 'mlp':
67
- head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
68
-
69
- self.head = nn.Sequential(head_layers)
70
-
71
- def lock(self, unlocked_groups=0, freeze_bn_stats=False):
72
- """ lock modules
73
- Args:
74
- unlocked_groups (int): leave last n layer groups unlocked (default: 0)
75
- """
76
- if not unlocked_groups:
77
- # lock full model
78
- for param in self.trunk.parameters():
79
- param.requires_grad = False
80
- if freeze_bn_stats:
81
- freeze_batch_norm_2d(self.trunk)
82
- else:
83
- # NOTE: partial freeze requires latest timm (master) branch and is subject to change
84
- try:
85
- # FIXME import here until API stable and in an official release
86
- from timm.models.helpers import group_parameters, group_modules
87
- except ImportError:
88
- raise RuntimeError(
89
- 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
90
- matcher = self.trunk.group_matcher()
91
- gparams = group_parameters(self.trunk, matcher)
92
- max_layer_id = max(gparams.keys())
93
- max_layer_id = max_layer_id - unlocked_groups
94
- for group_idx in range(max_layer_id + 1):
95
- group = gparams[group_idx]
96
- for param in group:
97
- self.trunk.get_parameter(param).requires_grad = False
98
- if freeze_bn_stats:
99
- gmodules = group_modules(self.trunk, matcher, reverse=True)
100
- gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
101
- freeze_batch_norm_2d(self.trunk, gmodules)
102
-
103
- def forward(self, x):
104
- x = self.trunk(x)
105
- x = self.head(x)
106
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGuardians/SummarizeWikipediaDocument/tester.py DELETED
@@ -1,21 +0,0 @@
1
- import wikipedia
2
-
3
- def search_wiki(text):
4
- article_list = wikipedia.search(text)
5
- wikipedia.page(article_list[0])
6
-
7
-
8
- def get_wiki(search_term):
9
- return wikipedia.page(search_term)
10
-
11
-
12
-
13
- # src = search_wiki('spacex')
14
- get = get_wiki('spacex')
15
- # print(src)
16
- print(get)
17
- print(wikipedia.summary("Python Programming Language"))
18
- x = search_wiki('spacex')
19
-
20
- print('done')
21
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/diffusionmodules/openaimodel.py DELETED
@@ -1,798 +0,0 @@
1
- from abc import abstractmethod
2
- import math
3
- import torch
4
-
5
- import numpy as np
6
- import torch as th
7
- import torch.nn as nn
8
- import torch.nn.functional as F
9
-
10
- from ldm.modules.diffusionmodules.util import (
11
- checkpoint,
12
- conv_nd,
13
- linear,
14
- avg_pool_nd,
15
- zero_module,
16
- normalization,
17
- timestep_embedding,
18
- )
19
- from ldm.modules.attention import SpatialTransformer
20
- from ldm.util import exists
21
-
22
-
23
- # dummy replace
24
- def convert_module_to_f16(x):
25
- pass
26
-
27
- def convert_module_to_f32(x):
28
- pass
29
-
30
-
31
- ## go
32
- class AttentionPool2d(nn.Module):
33
- """
34
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
35
- """
36
-
37
- def __init__(
38
- self,
39
- spacial_dim: int,
40
- embed_dim: int,
41
- num_heads_channels: int,
42
- output_dim: int = None,
43
- ):
44
- super().__init__()
45
- self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
46
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
47
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
48
- self.num_heads = embed_dim // num_heads_channels
49
- self.attention = QKVAttention(self.num_heads)
50
-
51
- def forward(self, x):
52
- b, c, *_spatial = x.shape
53
- x = x.reshape(b, c, -1) # NC(HW)
54
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
55
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
56
- x = self.qkv_proj(x)
57
- x = self.attention(x)
58
- x = self.c_proj(x)
59
- return x[:, :, 0]
60
-
61
-
62
- class TimestepBlock(nn.Module):
63
- """
64
- Any module where forward() takes timestep embeddings as a second argument.
65
- """
66
-
67
- @abstractmethod
68
- def forward(self, x, emb):
69
- """
70
- Apply the module to `x` given `emb` timestep embeddings.
71
- """
72
-
73
-
74
- class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
75
- """
76
- A sequential module that passes timestep embeddings to the children that
77
- support it as an extra input.
78
- """
79
-
80
- def forward(self, x, emb, context=None):
81
- for layer in self:
82
- if isinstance(layer, TimestepBlock):
83
- x = layer(x, emb)
84
- elif isinstance(layer, SpatialTransformer):
85
- x = layer(x, context)
86
- else:
87
- x = layer(x)
88
- return x
89
-
90
-
91
- class Upsample(nn.Module):
92
- """
93
- An upsampling layer with an optional convolution.
94
- :param channels: channels in the inputs and outputs.
95
- :param use_conv: a bool determining if a convolution is applied.
96
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
97
- upsampling occurs in the inner-two dimensions.
98
- """
99
-
100
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
101
- super().__init__()
102
- self.channels = channels
103
- self.out_channels = out_channels or channels
104
- self.use_conv = use_conv
105
- self.dims = dims
106
- if use_conv:
107
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
108
-
109
- def forward(self, x):
110
- assert x.shape[1] == self.channels
111
- if self.dims == 3:
112
- x = F.interpolate(
113
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
114
- )
115
- else:
116
- x = F.interpolate(x, scale_factor=2, mode="nearest")
117
- if self.use_conv:
118
- x = self.conv(x)
119
- return x
120
-
121
- class TransposedUpsample(nn.Module):
122
- 'Learned 2x upsampling without padding'
123
- def __init__(self, channels, out_channels=None, ks=5):
124
- super().__init__()
125
- self.channels = channels
126
- self.out_channels = out_channels or channels
127
-
128
- self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
129
-
130
- def forward(self,x):
131
- return self.up(x)
132
-
133
-
134
- class Downsample(nn.Module):
135
- """
136
- A downsampling layer with an optional convolution.
137
- :param channels: channels in the inputs and outputs.
138
- :param use_conv: a bool determining if a convolution is applied.
139
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
140
- downsampling occurs in the inner-two dimensions.
141
- """
142
-
143
- def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
144
- super().__init__()
145
- self.channels = channels
146
- self.out_channels = out_channels or channels
147
- self.use_conv = use_conv
148
- self.dims = dims
149
- stride = 2 if dims != 3 else (1, 2, 2)
150
- if use_conv:
151
- self.op = conv_nd(
152
- dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
153
- )
154
- else:
155
- assert self.channels == self.out_channels
156
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
157
-
158
- def forward(self, x):
159
- assert x.shape[1] == self.channels
160
- return self.op(x)
161
-
162
-
163
- class ResBlock(TimestepBlock):
164
- """
165
- A residual block that can optionally change the number of channels.
166
- :param channels: the number of input channels.
167
- :param emb_channels: the number of timestep embedding channels.
168
- :param dropout: the rate of dropout.
169
- :param out_channels: if specified, the number of out channels.
170
- :param use_conv: if True and out_channels is specified, use a spatial
171
- convolution instead of a smaller 1x1 convolution to change the
172
- channels in the skip connection.
173
- :param dims: determines if the signal is 1D, 2D, or 3D.
174
- :param use_checkpoint: if True, use gradient checkpointing on this module.
175
- :param up: if True, use this block for upsampling.
176
- :param down: if True, use this block for downsampling.
177
- """
178
-
179
- def __init__(
180
- self,
181
- channels,
182
- emb_channels,
183
- dropout,
184
- out_channels=None,
185
- use_conv=False,
186
- use_scale_shift_norm=False,
187
- dims=2,
188
- use_checkpoint=False,
189
- up=False,
190
- down=False,
191
- ):
192
- super().__init__()
193
- self.channels = channels
194
- self.emb_channels = emb_channels
195
- self.dropout = dropout
196
- self.out_channels = out_channels or channels
197
- self.use_conv = use_conv
198
- self.use_checkpoint = use_checkpoint
199
- self.use_scale_shift_norm = use_scale_shift_norm
200
-
201
- self.in_layers = nn.Sequential(
202
- normalization(channels),
203
- nn.SiLU(),
204
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
205
- )
206
-
207
- self.updown = up or down
208
-
209
- if up:
210
- self.h_upd = Upsample(channels, False, dims)
211
- self.x_upd = Upsample(channels, False, dims)
212
- elif down:
213
- self.h_upd = Downsample(channels, False, dims)
214
- self.x_upd = Downsample(channels, False, dims)
215
- else:
216
- self.h_upd = self.x_upd = nn.Identity()
217
-
218
- self.emb_layers = nn.Sequential(
219
- nn.SiLU(),
220
- linear(
221
- emb_channels,
222
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
223
- ),
224
- )
225
- self.out_layers = nn.Sequential(
226
- normalization(self.out_channels),
227
- nn.SiLU(),
228
- nn.Dropout(p=dropout),
229
- zero_module(
230
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
231
- ),
232
- )
233
-
234
- if self.out_channels == channels:
235
- self.skip_connection = nn.Identity()
236
- elif use_conv:
237
- self.skip_connection = conv_nd(
238
- dims, channels, self.out_channels, 3, padding=1
239
- )
240
- else:
241
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
242
-
243
- def forward(self, x, emb):
244
- """
245
- Apply the block to a Tensor, conditioned on a timestep embedding.
246
- :param x: an [N x C x ...] Tensor of features.
247
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
248
- :return: an [N x C x ...] Tensor of outputs.
249
- """
250
- return checkpoint(
251
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
252
- )
253
-
254
-
255
- def _forward(self, x, emb):
256
- if self.updown:
257
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
258
- h = in_rest(x)
259
- h = self.h_upd(h)
260
- x = self.x_upd(x)
261
- h = in_conv(h)
262
- else:
263
- h = self.in_layers(x)
264
- emb_out = self.emb_layers(emb).type(h.dtype)
265
- while len(emb_out.shape) < len(h.shape):
266
- emb_out = emb_out[..., None]
267
- if self.use_scale_shift_norm:
268
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
269
- scale, shift = th.chunk(emb_out, 2, dim=1)
270
- h = out_norm(h) * (1 + scale) + shift
271
- h = out_rest(h)
272
- else:
273
- h = h + emb_out
274
- h = self.out_layers(h)
275
- return self.skip_connection(x) + h
276
-
277
-
278
- class AttentionBlock(nn.Module):
279
- """
280
- An attention block that allows spatial positions to attend to each other.
281
- Originally ported from here, but adapted to the N-d case.
282
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
283
- """
284
-
285
- def __init__(
286
- self,
287
- channels,
288
- num_heads=1,
289
- num_head_channels=-1,
290
- use_checkpoint=False,
291
- use_new_attention_order=False,
292
- ):
293
- super().__init__()
294
- self.channels = channels
295
- if num_head_channels == -1:
296
- self.num_heads = num_heads
297
- else:
298
- assert (
299
- channels % num_head_channels == 0
300
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
301
- self.num_heads = channels // num_head_channels
302
- self.use_checkpoint = use_checkpoint
303
- self.norm = normalization(channels)
304
- self.qkv = conv_nd(1, channels, channels * 3, 1)
305
- if use_new_attention_order:
306
- # split qkv before split heads
307
- self.attention = QKVAttention(self.num_heads)
308
- else:
309
- # split heads before split qkv
310
- self.attention = QKVAttentionLegacy(self.num_heads)
311
-
312
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
313
-
314
- def forward(self, x):
315
- return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
316
- #return pt_checkpoint(self._forward, x) # pytorch
317
-
318
- def _forward(self, x):
319
- b, c, *spatial = x.shape
320
- x = x.reshape(b, c, -1)
321
- qkv = self.qkv(self.norm(x))
322
- h = self.attention(qkv)
323
- h = self.proj_out(h)
324
- return (x + h).reshape(b, c, *spatial)
325
-
326
-
327
- def count_flops_attn(model, _x, y):
328
- """
329
- A counter for the `thop` package to count the operations in an
330
- attention operation.
331
- Meant to be used like:
332
- macs, params = thop.profile(
333
- model,
334
- inputs=(inputs, timestamps),
335
- custom_ops={QKVAttention: QKVAttention.count_flops},
336
- )
337
- """
338
- b, c, *spatial = y[0].shape
339
- num_spatial = int(np.prod(spatial))
340
- # We perform two matmuls with the same number of ops.
341
- # The first computes the weight matrix, the second computes
342
- # the combination of the value vectors.
343
- matmul_ops = 2 * b * (num_spatial ** 2) * c
344
- model.total_ops += th.DoubleTensor([matmul_ops])
345
-
346
-
347
- class QKVAttentionLegacy(nn.Module):
348
- """
349
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
350
- """
351
-
352
- def __init__(self, n_heads):
353
- super().__init__()
354
- self.n_heads = n_heads
355
-
356
- def forward(self, qkv):
357
- """
358
- Apply QKV attention.
359
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
360
- :return: an [N x (H * C) x T] tensor after attention.
361
- """
362
- bs, width, length = qkv.shape
363
- assert width % (3 * self.n_heads) == 0
364
- ch = width // (3 * self.n_heads)
365
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
366
- scale = 1 / math.sqrt(math.sqrt(ch))
367
- weight = th.einsum(
368
- "bct,bcs->bts", q * scale, k * scale
369
- ) # More stable with f16 than dividing afterwards
370
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
371
- a = th.einsum("bts,bcs->bct", weight, v)
372
- return a.reshape(bs, -1, length)
373
-
374
- @staticmethod
375
- def count_flops(model, _x, y):
376
- return count_flops_attn(model, _x, y)
377
-
378
-
379
- class QKVAttention(nn.Module):
380
- """
381
- A module which performs QKV attention and splits in a different order.
382
- """
383
-
384
- def __init__(self, n_heads):
385
- super().__init__()
386
- self.n_heads = n_heads
387
-
388
- def forward(self, qkv):
389
- """
390
- Apply QKV attention.
391
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
392
- :return: an [N x (H * C) x T] tensor after attention.
393
- """
394
- bs, width, length = qkv.shape
395
- assert width % (3 * self.n_heads) == 0
396
- ch = width // (3 * self.n_heads)
397
- q, k, v = qkv.chunk(3, dim=1)
398
- scale = 1 / math.sqrt(math.sqrt(ch))
399
- weight = th.einsum(
400
- "bct,bcs->bts",
401
- (q * scale).view(bs * self.n_heads, ch, length),
402
- (k * scale).view(bs * self.n_heads, ch, length),
403
- ) # More stable with f16 than dividing afterwards
404
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
405
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
406
- return a.reshape(bs, -1, length)
407
-
408
- @staticmethod
409
- def count_flops(model, _x, y):
410
- return count_flops_attn(model, _x, y)
411
-
412
-
413
- class UNetModel(nn.Module):
414
- """
415
- The full UNet model with attention and timestep embedding.
416
- :param in_channels: channels in the input Tensor.
417
- :param model_channels: base channel count for the model.
418
- :param out_channels: channels in the output Tensor.
419
- :param num_res_blocks: number of residual blocks per downsample.
420
- :param attention_resolutions: a collection of downsample rates at which
421
- attention will take place. May be a set, list, or tuple.
422
- For example, if this contains 4, then at 4x downsampling, attention
423
- will be used.
424
- :param dropout: the dropout probability.
425
- :param channel_mult: channel multiplier for each level of the UNet.
426
- :param conv_resample: if True, use learned convolutions for upsampling and
427
- downsampling.
428
- :param dims: determines if the signal is 1D, 2D, or 3D.
429
- :param num_classes: if specified (as an int), then this model will be
430
- class-conditional with `num_classes` classes.
431
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
432
- :param num_heads: the number of attention heads in each attention layer.
433
- :param num_heads_channels: if specified, ignore num_heads and instead use
434
- a fixed channel width per attention head.
435
- :param num_heads_upsample: works with num_heads to set a different number
436
- of heads for upsampling. Deprecated.
437
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
438
- :param resblock_updown: use residual blocks for up/downsampling.
439
- :param use_new_attention_order: use a different attention pattern for potentially
440
- increased efficiency.
441
- """
442
-
443
- def __init__(
444
- self,
445
- image_size,
446
- in_channels,
447
- model_channels,
448
- out_channels,
449
- num_res_blocks,
450
- attention_resolutions,
451
- dropout=0,
452
- channel_mult=(1, 2, 4, 8),
453
- conv_resample=True,
454
- dims=2,
455
- num_classes=None,
456
- use_checkpoint=False,
457
- use_fp16=False,
458
- num_heads=-1,
459
- num_head_channels=-1,
460
- num_heads_upsample=-1,
461
- use_scale_shift_norm=False,
462
- resblock_updown=False,
463
- use_new_attention_order=False,
464
- use_spatial_transformer=False, # custom transformer support
465
- transformer_depth=1, # custom transformer support
466
- context_dim=None, # custom transformer support
467
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
468
- legacy=True,
469
- disable_self_attentions=None,
470
- num_attention_blocks=None,
471
- disable_middle_self_attn=False,
472
- use_linear_in_transformer=False,
473
- ):
474
- super().__init__()
475
- if use_spatial_transformer:
476
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
477
-
478
- if context_dim is not None:
479
- assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
480
- from omegaconf.listconfig import ListConfig
481
- if type(context_dim) == ListConfig:
482
- context_dim = list(context_dim)
483
-
484
- if num_heads_upsample == -1:
485
- num_heads_upsample = num_heads
486
-
487
- if num_heads == -1:
488
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
489
-
490
- if num_head_channels == -1:
491
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
492
-
493
- self.image_size = image_size
494
- self.in_channels = in_channels
495
- self.model_channels = model_channels
496
- self.out_channels = out_channels
497
- if isinstance(num_res_blocks, int):
498
- self.num_res_blocks = len(channel_mult) * [num_res_blocks]
499
- else:
500
- if len(num_res_blocks) != len(channel_mult):
501
- raise ValueError("provide num_res_blocks either as an int (globally constant) or "
502
- "as a list/tuple (per-level) with the same length as channel_mult")
503
- self.num_res_blocks = num_res_blocks
504
- if disable_self_attentions is not None:
505
- # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
506
- assert len(disable_self_attentions) == len(channel_mult)
507
- if num_attention_blocks is not None:
508
- assert len(num_attention_blocks) == len(self.num_res_blocks)
509
- assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
510
- print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
511
- f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
512
- f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
513
- f"attention will still not be set.")
514
-
515
- self.attention_resolutions = attention_resolutions
516
- self.dropout = dropout
517
- self.channel_mult = channel_mult
518
- self.conv_resample = conv_resample
519
- self.num_classes = num_classes
520
- self.use_checkpoint = use_checkpoint
521
- self.dtype = th.float16 if use_fp16 else th.float32
522
- self.num_heads = num_heads
523
- self.num_head_channels = num_head_channels
524
- self.num_heads_upsample = num_heads_upsample
525
- self.predict_codebook_ids = n_embed is not None
526
-
527
- time_embed_dim = model_channels * 4
528
- self.time_embed = nn.Sequential(
529
- linear(model_channels, time_embed_dim),
530
- nn.SiLU(),
531
- linear(time_embed_dim, time_embed_dim),
532
- )
533
-
534
- if self.num_classes is not None:
535
- if isinstance(self.num_classes, int):
536
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
537
- elif self.num_classes == "continuous":
538
- print("setting up linear c_adm embedding layer")
539
- self.label_emb = nn.Linear(1, time_embed_dim)
540
- else:
541
- raise ValueError()
542
-
543
- self.input_blocks = nn.ModuleList(
544
- [
545
- TimestepEmbedSequential(
546
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
547
- )
548
- ]
549
- )
550
- self._feature_size = model_channels
551
- input_block_chans = [model_channels]
552
- ch = model_channels
553
- ds = 1
554
- for level, mult in enumerate(channel_mult):
555
- for nr in range(self.num_res_blocks[level]):
556
- layers = [
557
- ResBlock(
558
- ch,
559
- time_embed_dim,
560
- dropout,
561
- out_channels=mult * model_channels,
562
- dims=dims,
563
- use_checkpoint=use_checkpoint,
564
- use_scale_shift_norm=use_scale_shift_norm,
565
- )
566
- ]
567
- ch = mult * model_channels
568
- if ds in attention_resolutions:
569
- if num_head_channels == -1:
570
- dim_head = ch // num_heads
571
- else:
572
- num_heads = ch // num_head_channels
573
- dim_head = num_head_channels
574
- if legacy:
575
- #num_heads = 1
576
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
577
- if exists(disable_self_attentions):
578
- disabled_sa = disable_self_attentions[level]
579
- else:
580
- disabled_sa = False
581
-
582
- if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
583
- layers.append(
584
- AttentionBlock(
585
- ch,
586
- use_checkpoint=use_checkpoint,
587
- num_heads=num_heads,
588
- num_head_channels=dim_head,
589
- use_new_attention_order=use_new_attention_order,
590
- ) if not use_spatial_transformer else SpatialTransformer(
591
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
592
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
593
- use_checkpoint=use_checkpoint
594
- )
595
- )
596
- self.input_blocks.append(TimestepEmbedSequential(*layers))
597
- self._feature_size += ch
598
- input_block_chans.append(ch)
599
- if level != len(channel_mult) - 1:
600
- out_ch = ch
601
- self.input_blocks.append(
602
- TimestepEmbedSequential(
603
- ResBlock(
604
- ch,
605
- time_embed_dim,
606
- dropout,
607
- out_channels=out_ch,
608
- dims=dims,
609
- use_checkpoint=use_checkpoint,
610
- use_scale_shift_norm=use_scale_shift_norm,
611
- down=True,
612
- )
613
- if resblock_updown
614
- else Downsample(
615
- ch, conv_resample, dims=dims, out_channels=out_ch
616
- )
617
- )
618
- )
619
- ch = out_ch
620
- input_block_chans.append(ch)
621
- ds *= 2
622
- self._feature_size += ch
623
-
624
- if num_head_channels == -1:
625
- dim_head = ch // num_heads
626
- else:
627
- num_heads = ch // num_head_channels
628
- dim_head = num_head_channels
629
- if legacy:
630
- #num_heads = 1
631
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
632
- self.middle_block = TimestepEmbedSequential(
633
- ResBlock(
634
- ch,
635
- time_embed_dim,
636
- dropout,
637
- dims=dims,
638
- use_checkpoint=use_checkpoint,
639
- use_scale_shift_norm=use_scale_shift_norm,
640
- ),
641
- AttentionBlock(
642
- ch,
643
- use_checkpoint=use_checkpoint,
644
- num_heads=num_heads,
645
- num_head_channels=dim_head,
646
- use_new_attention_order=use_new_attention_order,
647
- ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
648
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
649
- disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
650
- use_checkpoint=use_checkpoint
651
- ),
652
- ResBlock(
653
- ch,
654
- time_embed_dim,
655
- dropout,
656
- dims=dims,
657
- use_checkpoint=use_checkpoint,
658
- use_scale_shift_norm=use_scale_shift_norm,
659
- ),
660
- )
661
- self._feature_size += ch
662
-
663
- self.output_blocks = nn.ModuleList([])
664
- for level, mult in list(enumerate(channel_mult))[::-1]:
665
- for i in range(self.num_res_blocks[level] + 1):
666
- ich = input_block_chans.pop()
667
- layers = [
668
- ResBlock(
669
- ch + ich,
670
- time_embed_dim,
671
- dropout,
672
- out_channels=model_channels * mult,
673
- dims=dims,
674
- use_checkpoint=use_checkpoint,
675
- use_scale_shift_norm=use_scale_shift_norm,
676
- )
677
- ]
678
- ch = model_channels * mult
679
- if ds in attention_resolutions:
680
- if num_head_channels == -1:
681
- dim_head = ch // num_heads
682
- else:
683
- num_heads = ch // num_head_channels
684
- dim_head = num_head_channels
685
- if legacy:
686
- #num_heads = 1
687
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
688
- if exists(disable_self_attentions):
689
- disabled_sa = disable_self_attentions[level]
690
- else:
691
- disabled_sa = False
692
-
693
- if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
694
- layers.append(
695
- AttentionBlock(
696
- ch,
697
- use_checkpoint=use_checkpoint,
698
- num_heads=num_heads_upsample,
699
- num_head_channels=dim_head,
700
- use_new_attention_order=use_new_attention_order,
701
- ) if not use_spatial_transformer else SpatialTransformer(
702
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
703
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
704
- use_checkpoint=use_checkpoint
705
- )
706
- )
707
- if level and i == self.num_res_blocks[level]:
708
- out_ch = ch
709
- layers.append(
710
- ResBlock(
711
- ch,
712
- time_embed_dim,
713
- dropout,
714
- out_channels=out_ch,
715
- dims=dims,
716
- use_checkpoint=use_checkpoint,
717
- use_scale_shift_norm=use_scale_shift_norm,
718
- up=True,
719
- )
720
- if resblock_updown
721
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
722
- )
723
- ds //= 2
724
- self.output_blocks.append(TimestepEmbedSequential(*layers))
725
- self._feature_size += ch
726
-
727
- self.out = nn.Sequential(
728
- normalization(ch),
729
- nn.SiLU(),
730
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
731
- )
732
- if self.predict_codebook_ids:
733
- self.id_predictor = nn.Sequential(
734
- normalization(ch),
735
- conv_nd(dims, model_channels, n_embed, 1),
736
- #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
737
- )
738
-
739
- def convert_to_fp16(self):
740
- """
741
- Convert the torso of the model to float16.
742
- """
743
- self.input_blocks.apply(convert_module_to_f16)
744
- self.middle_block.apply(convert_module_to_f16)
745
- self.output_blocks.apply(convert_module_to_f16)
746
-
747
- def convert_to_fp32(self):
748
- """
749
- Convert the torso of the model to float32.
750
- """
751
- self.input_blocks.apply(convert_module_to_f32)
752
- self.middle_block.apply(convert_module_to_f32)
753
- self.output_blocks.apply(convert_module_to_f32)
754
-
755
- def forward(self, x, timesteps=None, context=None, y=None, features_adapter=None, append_to_context=None, **kwargs):
756
- """
757
- Apply the model to an input batch.
758
- :param x: an [N x C x ...] Tensor of inputs.
759
- :param timesteps: a 1-D batch of timesteps.
760
- :param context: conditioning plugged in via crossattn
761
- :param y: an [N] Tensor of labels, if class-conditional.
762
- :return: an [N x C x ...] Tensor of outputs.
763
- """
764
- assert (y is not None) == (
765
- self.num_classes is not None
766
- ), "must specify y if and only if the model is class-conditional"
767
- hs = []
768
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
769
- emb = self.time_embed(t_emb)
770
-
771
- if self.num_classes is not None:
772
- assert y.shape[0] == x.shape[0]
773
- emb = emb + self.label_emb(y)
774
-
775
- h = x.type(self.dtype)
776
-
777
- if append_to_context is not None:
778
- context = torch.cat([context, append_to_context], dim=1)
779
-
780
- adapter_idx = 0
781
- for id, module in enumerate(self.input_blocks):
782
- h = module(h, emb, context)
783
- if ((id+1)%3 == 0) and features_adapter is not None:
784
- h = h + features_adapter[adapter_idx]
785
- adapter_idx += 1
786
- hs.append(h)
787
- if features_adapter is not None:
788
- assert len(features_adapter)==adapter_idx, 'Wrong features_adapter'
789
-
790
- h = self.middle_block(h, emb, context)
791
- for module in self.output_blocks:
792
- h = th.cat([h, hs.pop()], dim=1)
793
- h = module(h, emb, context)
794
- h = h.type(x.dtype)
795
- if self.predict_codebook_ids:
796
- return self.id_predictor(h)
797
- else:
798
- return self.out(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/Reset.js DELETED
@@ -1,16 +0,0 @@
1
- /*
2
- 1. Destroy all chess
3
- 2. Fill chess
4
- 3. Break match3
5
- */
6
-
7
- var Reset = function() {
8
- // Destroy all chess
9
- this.board.removeAllChess();
10
- // Fill chess (with initial symbol map)
11
- this.fill(this.initSymbolsMap);
12
- // Break match3
13
- this.breakMatch3();
14
- }
15
-
16
- export default Reset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ahmedmewloud/Depplearnig/traduction.py DELETED
@@ -1,713 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """Traduction.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1qOS7cqek1bQPypxFqx-9G1ApPANNHL2X
8
- """
9
-
10
- !pip install "tensorflow-text>=2.11"
11
- !pip install einops
12
-
13
- from google.colab import drive
14
- drive.mount('/content/drive')
15
-
16
- import numpy as np
17
-
18
- import typing
19
- from typing import Any, Tuple
20
-
21
-
22
-
23
-
24
-
25
- import numpy as np
26
-
27
- import typing
28
- from typing import Any, Tuple
29
-
30
- import tensorflow as tf
31
-
32
- import tensorflow_text as tf_text
33
- import einops
34
- import matplotlib.pyplot as plt
35
- import matplotlib.ticker as ticker
36
-
37
- import tensorflow as tf
38
-
39
-
40
- #import tensorflow_text as tf_text
41
-
42
- class ShapeChecker():
43
- def __init__(self):
44
- # Keep a cache of every axis-name seen
45
- self.shapes = {}
46
-
47
- def __call__(self, tensor, names, broadcast=False):
48
- if not tf.executing_eagerly():
49
- return
50
-
51
- parsed = einops.parse_shape(tensor, names)
52
-
53
- for name, new_dim in parsed.items():
54
- old_dim = self.shapes.get(name, None)
55
-
56
- if (broadcast and new_dim == 1):
57
- continue
58
-
59
- if old_dim is None:
60
- # If the axis name is new, add its length to the cache.
61
- self.shapes[name] = new_dim
62
- continue
63
-
64
- if new_dim != old_dim:
65
- raise ValueError(f"Shape mismatch for dimension: '{name}'\n"
66
- f" found: {new_dim}\n"
67
- f" expected: {old_dim}\n")
68
-
69
- """pour les donnees nous utilisons une api par Anki """
70
-
71
- # le telechargement du donnees de training
72
- import os
73
-
74
- if not os.path.isfile('/content/fra.txt'):
75
- !wget http://www.manythings.org/anki/fra-eng.zip -P /content
76
- !unzip /content/fra-eng.zip -d /content
77
- else:
78
- print('File already downloaded and extracted.')
79
-
80
- path_to_file ='/content/fra.txt'
81
-
82
-
83
-
84
- from pathlib import Path
85
- import numpy as np
86
-
87
- """la fonction load_data(path) une fonction qui retourne un array numpy tel un paire ( pahrse en fr == > phrase en eng )"""
88
-
89
- def load_data(path):
90
- path = Path(path)
91
- text = path.read_text(encoding='utf-8')
92
-
93
- lines = text.splitlines()
94
- pairs = [line.split('\t') for line in lines]
95
- # print(pairs[2])
96
- context = np.array([pairs[index][1] for index in range(len(pairs))])
97
- target = np.array([pairs[index][0] for index in range(len(pairs))])
98
-
99
- return target, context
100
-
101
- """un test d'affichage"""
102
-
103
- # targ, inp = load_data(path_to_file)
104
- target_raw, context_raw = load_data(path_to_file)
105
-
106
- print(len(context_raw),len(target_raw))
107
- for i in range(100):
108
- print(context_raw[i]+'\t')
109
- print(target_raw[i]+'\n')
110
-
111
- BUFFER_SIZE = len(context_raw)
112
- BATCH_SIZE = 64
113
-
114
- is_train = np.random.uniform(size=(len(target_raw),)) < 0.8
115
-
116
- train_raw = (
117
- tf.data.Dataset
118
- .from_tensor_slices((context_raw[is_train], target_raw[is_train]))
119
- .shuffle(BUFFER_SIZE)
120
- .batch(BATCH_SIZE))
121
- val_raw = (
122
- tf.data.Dataset
123
- .from_tensor_slices((context_raw[~is_train], target_raw[~is_train]))
124
- .shuffle(BUFFER_SIZE)
125
- .batch(BATCH_SIZE))
126
-
127
- for example_context_strings, example_target_strings in train_raw.take(1):
128
- print(example_context_strings[:5])
129
- print()
130
- print(example_target_strings[:5])
131
- break
132
-
133
- example_text = tf.constant('Salut Prenez vos jambes à vos cous !')
134
-
135
- print(example_text.numpy())
136
- print(tf_text.normalize_utf8(example_text, 'NFKD').numpy())
137
-
138
- #La normalisation
139
- def tf_lower_and_split_punct(text):
140
- # Split accecented characters.
141
- text = tf_text.normalize_utf8(text, 'NFKD')
142
- text = tf.strings.lower(text)
143
- # Keep space, a to z, and select punctuation.
144
- text = tf.strings.regex_replace(text, '[^ a-z.?!,¿]', '')
145
- # Add spaces around punctuation.
146
- text = tf.strings.regex_replace(text, '[.?!,¿]', r' \0 ')
147
- # Strip whitespace.
148
- text = tf.strings.strip(text)
149
-
150
- text = tf.strings.join(['[START]', text, '[END]'], separator=' ')
151
- return text
152
-
153
- # Avent la normalisation
154
- print(example_text.numpy().decode())
155
- #Apres la normalisation
156
- print(tf_lower_and_split_punct(example_text).numpy().decode())
157
-
158
- #Vectorisation de texte
159
- max_vocab_size = 5000
160
-
161
- input_text_processor = tf.keras.layers.TextVectorization(
162
- standardize=tf_lower_and_split_punct,
163
- max_tokens=max_vocab_size)
164
-
165
- max_vocab_size = 5000
166
-
167
- context_text_processor = tf.keras.layers.TextVectorization(
168
- standardize=tf_lower_and_split_punct,
169
- max_tokens=max_vocab_size,
170
- ragged=True)
171
-
172
- context_text_processor.adapt(train_raw.map(lambda context, target: context))
173
-
174
- # Here are the first 10 words from the vocabulary:
175
- context_text_processor.get_vocabulary()[:10]
176
-
177
- target_text_processor = tf.keras.layers.TextVectorization(
178
- standardize=tf_lower_and_split_punct,
179
- max_tokens=max_vocab_size,
180
- ragged=True)
181
-
182
- target_text_processor.adapt(train_raw.map(lambda context, target: target))
183
- target_text_processor.get_vocabulary()[:10]
184
-
185
- example_tokens = context_text_processor(example_context_strings)
186
- example_tokens[:3, :]
187
-
188
- context_vocab = np.array(context_text_processor.get_vocabulary())
189
- tokens = context_vocab[example_tokens[0].numpy()]
190
- ' '.join(tokens)
191
-
192
- plt.subplot(1, 2, 1)
193
- plt.pcolormesh(example_tokens.to_tensor())
194
- plt.title('Token IDs')
195
-
196
- plt.subplot(1, 2, 2)
197
- plt.pcolormesh(example_tokens.to_tensor() != 0)
198
- plt.title('Mask')
199
-
200
- def process_text(context, target):
201
- context = context_text_processor(context).to_tensor()
202
- target = target_text_processor(target)
203
- targ_in = target[:,:-1].to_tensor()
204
- targ_out = target[:,1:].to_tensor()
205
- return (context, targ_in), targ_out
206
-
207
-
208
- train_ds = train_raw.map(process_text, tf.data.AUTOTUNE)
209
- val_ds = val_raw.map(process_text, tf.data.AUTOTUNE)
210
-
211
- for (ex_context_tok, ex_tar_in), ex_tar_out in train_ds.take(1):
212
- print(ex_context_tok[0, :10].numpy())
213
- print()
214
- print(ex_tar_in[0, :10].numpy())
215
- print(ex_tar_out[0, :10].numpy())
216
-
217
- UNITS = 256
218
-
219
-
220
-
221
- """Fin 21114
222
-
223
- # **Encoder/decoder**
224
-
225
- **Avant d'entrer dans le détail, nous définissons des constantes pour le modèle :**
226
- """
227
-
228
- UNITS = 256
229
-
230
- """Un RNN bidirectionnel
231
-
232
- **L'** encodeur
233
- """
234
-
235
- class Encoder(tf.keras.layers.Layer):
236
- def __init__(self, text_processor, units):
237
- super(Encoder, self).__init__()
238
- self.text_processor = text_processor
239
- self.vocab_size = text_processor.vocabulary_size()
240
- self.units = units
241
-
242
- # The embedding layer converts tokens to vectors
243
- self.embedding = tf.keras.layers.Embedding(self.vocab_size, units,
244
- mask_zero=True)
245
-
246
- # The RNN layer processes those vectors sequentially.
247
- self.rnn = tf.keras.layers.Bidirectional(
248
- merge_mode='sum',
249
- layer=tf.keras.layers.GRU(units,
250
- # Return the sequence and state
251
- return_sequences=True,
252
- recurrent_initializer='glorot_uniform'))
253
-
254
- def call(self, x):
255
- shape_checker = ShapeChecker()
256
- shape_checker(x, 'batch s')
257
-
258
- # 2. The embedding layer looks up the embedding vector for each token.
259
- x = self.embedding(x)
260
- shape_checker(x, 'batch s units')
261
-
262
- # 3. The GRU processes the sequence of embeddings.
263
- x = self.rnn(x)
264
- shape_checker(x, 'batch s units')
265
-
266
- # 4. Returns the new sequence of embeddings.
267
- return x
268
-
269
- def convert_input(self, texts):
270
- texts = tf.convert_to_tensor(texts)
271
- if len(texts.shape) == 0:
272
- texts = tf.convert_to_tensor(texts)[tf.newaxis]
273
- context = self.text_processor(texts).to_tensor()
274
- context = self(context)
275
- return context
276
-
277
- # Encode the input sequence.
278
- encoder = Encoder(context_text_processor, UNITS)
279
- ex_context = encoder(ex_context_tok)
280
-
281
- print(f'Context tokens, shape (batch, s): {ex_context_tok.shape}')
282
- print(f'Encoder output, shape (batch, s, units): {ex_context.shape}')
283
-
284
- """
285
-
286
- La couche d'**attention**"""
287
-
288
- class CrossAttention(tf.keras.layers.Layer):
289
- def __init__(self, units, **kwargs):
290
- super().__init__()
291
- self.mha = tf.keras.layers.MultiHeadAttention(key_dim=units, num_heads=1, **kwargs)
292
- self.layernorm = tf.keras.layers.LayerNormalization()
293
- self.add = tf.keras.layers.Add()
294
-
295
- def call(self, x, context):
296
- shape_checker = ShapeChecker()
297
-
298
- shape_checker(x, 'batch t units')
299
- shape_checker(context, 'batch s units')
300
-
301
- attn_output, attn_scores = self.mha(
302
- query=x,
303
- value=context,
304
- return_attention_scores=True)
305
-
306
- shape_checker(x, 'batch t units')
307
- shape_checker(attn_scores, 'batch heads t s')
308
-
309
- # Cache the attention scores for plotting later.
310
- attn_scores = tf.reduce_mean(attn_scores, axis=1)
311
- shape_checker(attn_scores, 'batch t s')
312
- self.last_attention_weights = attn_scores
313
-
314
- x = self.add([x, attn_output])
315
- x = self.layernorm(x)
316
-
317
- return x
318
-
319
- attention_layer = CrossAttention(UNITS)
320
-
321
- # Attend to the encoded tokens
322
- embed = tf.keras.layers.Embedding(target_text_processor.vocabulary_size(),
323
- output_dim=UNITS, mask_zero=True)
324
- ex_tar_embed = embed(ex_tar_in)
325
-
326
- result = attention_layer(ex_tar_embed, ex_context)
327
-
328
- print(f'Context sequence, shape (batch, s, units): {ex_context.shape}')
329
- print(f'Target sequence, shape (batch, t, units): {ex_tar_embed.shape}')
330
- print(f'Attention result, shape (batch, t, units): {result.shape}')
331
- print(f'Attention weights, shape (batch, t, s): {attention_layer.last_attention_weights.shape}')
332
-
333
- attention_layer.last_attention_weights[0].numpy().sum(axis=-1)
334
-
335
- attention_weights = attention_layer.last_attention_weights
336
- mask=(ex_context_tok != 0).numpy()
337
-
338
- plt.subplot(1, 2, 1)
339
- plt.pcolormesh(mask*attention_weights[:, 0, :])
340
- plt.title('Attention weights')
341
-
342
- plt.subplot(1, 2, 2)
343
- plt.pcolormesh(mask)
344
- plt.title('Mask');
345
-
346
- """Un RNN unidirectionnel
347
-
348
- le **Décodeur**
349
- """
350
-
351
- class Decoder(tf.keras.layers.Layer):
352
- @classmethod
353
- def add_method(cls, fun):
354
- setattr(cls, fun.__name__, fun)
355
- return fun
356
-
357
- def __init__(self, text_processor, units):
358
- super(Decoder, self).__init__()
359
- self.text_processor = text_processor
360
- self.vocab_size = text_processor.vocabulary_size()
361
- self.word_to_id = tf.keras.layers.StringLookup(
362
- vocabulary=text_processor.get_vocabulary(),
363
- mask_token='', oov_token='[UNK]')
364
- self.id_to_word = tf.keras.layers.StringLookup(
365
- vocabulary=text_processor.get_vocabulary(),
366
- mask_token='', oov_token='[UNK]',
367
- invert=True)
368
- self.start_token = self.word_to_id('[START]')
369
- self.end_token = self.word_to_id('[END]')
370
-
371
- self.units = units
372
-
373
-
374
- # 1. The embedding layer converts token IDs to vectors
375
- self.embedding = tf.keras.layers.Embedding(self.vocab_size,
376
- units, mask_zero=True)
377
-
378
- # 2. The RNN keeps track of what's been generated so far.
379
- self.rnn = tf.keras.layers.GRU(units,
380
- return_sequences=True,
381
- return_state=True,
382
- recurrent_initializer='glorot_uniform')
383
-
384
- # 3. The RNN output will be the query for the attention layer.
385
- self.attention = CrossAttention(units)
386
-
387
- # 4. This fully connected layer produces the logits for each
388
- # output token.
389
- self.output_layer = tf.keras.layers.Dense(self.vocab_size)
390
-
391
- """**Training**"""
392
-
393
- @Decoder.add_method
394
- def call(self,
395
- context, x,
396
- state=None,
397
- return_state=False):
398
- shape_checker = ShapeChecker()
399
- shape_checker(x, 'batch t')
400
- shape_checker(context, 'batch s units')
401
-
402
- # 1. Lookup the embeddings
403
- x = self.embedding(x)
404
- shape_checker(x, 'batch t units')
405
-
406
- # 2. Process the target sequence.
407
- x, state = self.rnn(x, initial_state=state)
408
- shape_checker(x, 'batch t units')
409
-
410
- # 3. Use the RNN output as the query for the attention over the context.
411
- x = self.attention(x, context)
412
- self.last_attention_weights = self.attention.last_attention_weights
413
- shape_checker(x, 'batch t units')
414
- shape_checker(self.last_attention_weights, 'batch t s')
415
-
416
- # Step 4. Generate logit predictions for the next token.
417
- logits = self.output_layer(x)
418
- shape_checker(logits, 'batch t target_vocab_size')
419
-
420
- if return_state:
421
- return logits, state
422
- else:
423
- return logits
424
-
425
- decoder = Decoder(target_text_processor, UNITS)
426
-
427
- logits = decoder(ex_context, ex_tar_in)
428
-
429
- print(f'encoder output shape: (batch, s, units) {ex_context.shape}')
430
- print(f'input target tokens shape: (batch, t) {ex_tar_in.shape}')
431
- print(f'logits shape shape: (batch, target_vocabulary_size) {logits.shape}')
432
-
433
- """**Inference**"""
434
-
435
- @Decoder.add_method
436
- def get_initial_state(self, context):
437
- batch_size = tf.shape(context)[0]
438
- start_tokens = tf.fill([batch_size, 1], self.start_token)
439
- done = tf.zeros([batch_size, 1], dtype=tf.bool)
440
- embedded = self.embedding(start_tokens)
441
- return start_tokens, done, self.rnn.get_initial_state(embedded)[0]
442
-
443
- @Decoder.add_method
444
- def tokens_to_text(self, tokens):
445
- words = self.id_to_word(tokens)
446
- result = tf.strings.reduce_join(words, axis=-1, separator=' ')
447
- result = tf.strings.regex_replace(result, '^ *\[START\] *', '')
448
- result = tf.strings.regex_replace(result, ' *\[END\] *$', '')
449
- return result
450
-
451
- @Decoder.add_method
452
- def get_next_token(self, context, next_token, done, state, temperature = 0.0):
453
- logits, state = self(
454
- context, next_token,
455
- state = state,
456
- return_state=True)
457
-
458
- if temperature == 0.0:
459
- next_token = tf.argmax(logits, axis=-1)
460
- else:
461
- logits = logits[:, -1, :]/temperature
462
- next_token = tf.random.categorical(logits, num_samples=1)
463
-
464
- # If a sequence produces an `end_token`, set it `done`
465
- done = done | (next_token == self.end_token)
466
- # Once a sequence is done it only produces 0-padding.
467
- next_token = tf.where(done, tf.constant(0, dtype=tf.int64), next_token)
468
-
469
- return next_token, done, state
470
-
471
- # Setup the loop variables.
472
- next_token, done, state = decoder.get_initial_state(ex_context)
473
- tokens = []
474
-
475
- for n in range(10):
476
- # Run one step.
477
- next_token, done, state = decoder.get_next_token(
478
- ex_context, next_token, done, state, temperature=1.0)
479
- # Add the token to the output.
480
- tokens.append(next_token)
481
-
482
- # Stack all the tokens together.
483
- tokens = tf.concat(tokens, axis=-1) # (batch, t)
484
-
485
- # Convert the tokens back to a a string
486
- result = decoder.tokens_to_text(tokens)
487
- result[:3].numpy()
488
-
489
- """### Fin 21196"""
490
-
491
- class Translator(tf.keras.Model):
492
- @classmethod
493
- def add_method(cls, fun):
494
- setattr(cls, fun.__name__, fun)
495
- return fun
496
-
497
- def __init__(self, units,
498
- context_text_processor,
499
- target_text_processor):
500
- super().__init__()
501
- # Build the encoder and decoder
502
- encoder = Encoder(context_text_processor, units)
503
- decoder = Decoder(target_text_processor, units)
504
-
505
- self.encoder = encoder
506
- self.decoder = decoder
507
-
508
- def call(self, inputs):
509
- context, x = inputs
510
- context = self.encoder(context)
511
- logits = self.decoder(context, x)
512
-
513
- #TODO(b/250038731): remove this
514
- try:
515
- # Delete the keras mask, so keras doesn't scale the loss+accuracy.
516
- del logits._keras_mask
517
- except AttributeError:
518
- pass
519
-
520
- return logits
521
-
522
- """necessite clarification"""
523
-
524
- model = Translator(UNITS, context_text_processor, target_text_processor)
525
-
526
- logits = model((ex_context_tok, ex_tar_in))
527
-
528
- print(f'Context tokens, shape: (batch, s, units) {ex_context_tok.shape}')
529
- print(f'Target tokens, shape: (batch, t) {ex_tar_in.shape}')
530
- print(f'logits, shape: (batch, t, target_vocabulary_size) {logits.shape}')
531
-
532
- def masked_loss(y_true, y_pred):
533
- # Calculate the loss for each item in the batch.
534
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
535
- from_logits=True, reduction='none')
536
- loss = loss_fn(y_true, y_pred)
537
-
538
- # Mask off the losses on padding.
539
- mask = tf.cast(y_true != 0, loss.dtype)
540
- loss *= mask
541
-
542
- # Return the total.
543
- return tf.reduce_sum(loss)/tf.reduce_sum(mask)
544
-
545
- def masked_acc(y_true, y_pred):
546
- # Calculate the loss for each item in the batch.
547
- y_pred = tf.argmax(y_pred, axis=-1)
548
- y_pred = tf.cast(y_pred, y_true.dtype)
549
-
550
- match = tf.cast(y_true == y_pred, tf.float32)
551
- mask = tf.cast(y_true != 0, tf.float32)
552
-
553
- return tf.reduce_sum(match)/tf.reduce_sum(mask)
554
-
555
- """compilation du modele"""
556
-
557
- model.compile(optimizer='adam',
558
- loss=masked_loss,
559
- metrics=[masked_acc, masked_loss])
560
-
561
- """clalcule metric"""
562
-
563
- vocab_size = 1.0 * target_text_processor.vocabulary_size()
564
-
565
- {"expected_loss": tf.math.log(vocab_size).numpy(),
566
- "expected_acc": 1/vocab_size}
567
-
568
- """evalution du modele"""
569
-
570
- model.evaluate(val_ds, steps=20, return_dict=True)
571
-
572
- import os
573
-
574
- # Vérifier si un fichier de sauvegarde existe
575
- if not os.path.exists('model_weights.h5'):
576
- # Le fichier de sauvegarde n'existe pas, exécuter l'entraînement
577
- history = model.fit(
578
- train_ds.repeat(),
579
- epochs=100,
580
- steps_per_epoch=100,
581
- validation_data=val_ds,
582
- validation_steps=20,
583
- callbacks=[
584
- tf.keras.callbacks.EarlyStopping(patience=3)])
585
-
586
- # Sauvegarder les poids du modèle
587
- model.save_weights('model_weights.h5')
588
- else:
589
- # Le fichier de sauvegarde existe, on passe à l'étape suivante
590
- print("Le modèle a déjà été entraîné. Passer à l'étape suivante.")
591
- #history = model.fit(
592
- # train_ds.repeat(),
593
- # epochs=100,
594
- # steps_per_epoch = 100,
595
- #validation_data=val_ds,
596
- #validation_steps = 20,
597
- #callbacks=[
598
- # tf.keras.callbacks.EarlyStopping(patience=3)])
599
-
600
- plt.plot(history.history['loss'], label='loss')
601
- plt.plot(history.history['val_loss'], label='val_loss')
602
- plt.ylim([0, max(plt.ylim())])
603
- plt.xlabel('Epoch #')
604
- plt.ylabel('CE/token')
605
- plt.legend()
606
-
607
- plt.plot(history.history['masked_acc'], label='accuracy')
608
- plt.plot(history.history['val_masked_acc'], label='val_accuracy')
609
- plt.ylim([0, max(plt.ylim())])
610
- plt.xlabel('Epoch #')
611
- plt.ylabel('CE/token')
612
- plt.legend()
613
-
614
- """ici la translation des texts """
615
-
616
- #@title
617
- @Translator.add_method
618
- def translate(self,
619
- texts, *,
620
- max_length=50,
621
- temperature=0.0):
622
- # Process the input texts
623
- context = self.encoder.convert_input(texts)
624
- batch_size = tf.shape(texts)[0]
625
-
626
- # Setup the loop inputs
627
- tokens = []
628
- attention_weights = []
629
- next_token, done, state = self.decoder.get_initial_state(context)
630
-
631
- for _ in range(max_length):
632
- # Generate the next token
633
- next_token, done, state = self.decoder.get_next_token(
634
- context, next_token, done, state, temperature)
635
-
636
- # Collect the generated tokens
637
- tokens.append(next_token)
638
- attention_weights.append(self.decoder.last_attention_weights)
639
-
640
- if tf.executing_eagerly() and tf.reduce_all(done):
641
- break
642
-
643
- # Stack the lists of tokens and attention weights.
644
- tokens = tf.concat(tokens, axis=-1) # t*[(batch 1)] -> (batch, t)
645
- self.last_attention_weights = tf.concat(attention_weights, axis=1) # t*[(batch 1 s)] -> (batch, t s)
646
-
647
- result = self.decoder.tokens_to_text(tokens)
648
- return result
649
-
650
- """test du translate"""
651
-
652
- result = model.translate(['tu est dans la maison']) # Are you still home
653
- result[0].numpy().decode()
654
-
655
- #@title
656
- @Translator.add_method
657
- def plot_attention(self, text, **kwargs):
658
- assert isinstance(text, str)
659
- output = self.translate([text], **kwargs)
660
- output = output[0].numpy().decode()
661
-
662
- attention = self.last_attention_weights[0]
663
-
664
- context = tf_lower_and_split_punct(text)
665
- context = context.numpy().decode().split()
666
-
667
- output = tf_lower_and_split_punct(output)
668
- output = output.numpy().decode().split()[1:]
669
-
670
- fig = plt.figure(figsize=(10, 10))
671
- ax = fig.add_subplot(1, 1, 1)
672
-
673
- ax.matshow(attention, cmap='viridis', vmin=0.0)
674
-
675
- fontdict = {'fontsize': 14}
676
-
677
- ax.set_xticklabels([''] + context, fontdict=fontdict, rotation=90)
678
- ax.set_yticklabels([''] + output, fontdict=fontdict)
679
-
680
- ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
681
- ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
682
-
683
- ax.set_xlabel('Input text')
684
- ax.set_ylabel('Output text')
685
-
686
- """quelques test"""
687
-
688
- # Commented out IPython magic to ensure Python compatibility.
689
- # %%time
690
- # # This is my life.
691
- # model.plot_attention('A partir de ces tableaux de chaînes ')
692
- #
693
-
694
- # Commented out IPython magic to ensure Python compatibility.
695
- # %%time
696
- # # Try to find out.'
697
- # model.plot_attention('nous sommes des etudiants d''école polytechnique')
698
-
699
- """fin 21199
700
-
701
- """
702
-
703
- !pip install gradio
704
-
705
- import gradio as gr
706
-
707
- def translate_text(text):
708
- result = model.translate([text])
709
- translated_text = result[0].numpy().decode()
710
- return translated_text
711
-
712
- iface = gr.Interface(fn=translate_text, inputs="text", outputs="text", title="Translation App")
713
- iface.launch(debug=True, share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aityz/Aityz_Model_Eli5/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Aityz Model Eli5
3
- emoji: 💻
4
- colorFrom: blue
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aityz/Aityz_Model_Eli5/app.py DELETED
@@ -1,20 +0,0 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer
3
- from transformers import AutoModelForCausalLM
4
- import torch
5
-
6
- tokenizer = AutoTokenizer.from_pretrained("Aityz/Aityz_model_eli5")
7
-
8
- model = AutoModelForCausalLM.from_pretrained("Aityz/Aityz_model_eli5")
9
-
10
- # maxtokens = int(input('What would you like the max tokens to be (default: 100) '))
11
-
12
- def aityz(input, maxtokens):
13
- prompt = input
14
- inputs = tokenizer(prompt, return_tensors="pt").input_ids
15
- outputs = model.generate(inputs, max_new_tokens=maxtokens, do_sample=True, top_k=50, top_p=0.95)
16
- output = tokenizer.batch_decode(outputs, skip_special_tokens=True)
17
- outputstr = ''.join(output)
18
- return(outputstr)
19
- demo = gr.Interface(fn=aityz, inputs=["textbox", gr.Slider(1, 1000, value=100)], outputs="textbox")
20
- demo.launch() # enable share=True for Non Hugging Face Spaces Usage.........
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/paper_runfiles/generate_val_test.sh DELETED
@@ -1,28 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst
4
-
5
- # paths to data are valid for mml7
6
- PLACES_ROOT="/data/inpainting/Places365"
7
- OUT_DIR="/data/inpainting/paper_data/Places365_val_test"
8
-
9
- source "$(dirname $0)/env.sh"
10
-
11
- for datadir in test_large_30k # val_large
12
- do
13
- for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512
14
- do
15
- "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \
16
- "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8
17
-
18
- "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
19
- done
20
-
21
- for conf in segm_256 segm_512
22
- do
23
- "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \
24
- "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2
25
-
26
- "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
27
- done
28
- done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alican/pixera/options/base_options.py DELETED
@@ -1,141 +0,0 @@
1
- import argparse
2
- import os
3
- from util import util
4
- import torch
5
- import models
6
- import data
7
-
8
-
9
- class BaseOptions():
10
- """This class defines options used during both training and test time.
11
-
12
- It also implements several helper functions such as parsing, printing, and saving the options.
13
- It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
14
- """
15
-
16
- def __init__(self):
17
- """Reset the class; indicates the class hasn't been initailized"""
18
- self.initialized = False
19
-
20
- def initialize(self, parser):
21
- """Define the common options that are used in both training and test.
22
-
23
- """
24
- # basic parameters
25
- parser.add_argument('--dataroot', required=False, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
26
- parser.add_argument('--name', type=str, default='pixera_CYCLEGAN', help='name of the experiment. It decides where to store samples and models')
27
- parser.add_argument('--gpu_ids', type=str, default='-1', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
28
- parser.add_argument('--checkpoints_dir', type=str, default='./models', help='models are saved here')
29
- # model parameters
30
- parser.add_argument('--model', type=str, default='test', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
31
- parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
32
- parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
33
- parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
34
- parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
35
- parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
36
- parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
37
- parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
38
- parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
39
- parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
40
- parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
41
- parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
42
- # dataset parameters
43
- parser.add_argument('--dataset_mode', type=str, default='single', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
44
- parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
45
- parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
46
- parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
47
- parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
48
- parser.add_argument('--load_size', type=int, default=512, help='scale images to this size')
49
- parser.add_argument('--crop_size', type=int, default=512, help='then crop to this size')
50
- parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
51
- parser.add_argument('--preprocess', type=str, default='scale_width', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
52
- parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
53
- parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
54
- # additional parameters
55
- parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
56
- parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
57
- parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
58
- parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
59
- # wandb parameters
60
- parser.add_argument('--use_wandb', action='store_true', help='if specified, then init wandb logging')
61
- parser.add_argument('--wandb_project_name', type=str, default='CycleGAN-and-pix2pix', help='specify wandb project name')
62
- self.initialized = True
63
- return parser
64
-
65
- def gather_options(self):
66
- """Initialize our parser with basic options(only once).
67
- Add additional model-specific and dataset-specific options.
68
- These options are defined in the <modify_commandline_options> function
69
- in model and dataset classes.
70
- """
71
- if not self.initialized: # check if it has been initialized
72
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
73
- parser = self.initialize(parser)
74
-
75
- # get the basic options
76
- opt, _ = parser.parse_known_args()
77
-
78
- # modify model-related parser options
79
- model_name = opt.model
80
- model_option_setter = models.get_option_setter(model_name)
81
- parser = model_option_setter(parser, self.isTrain)
82
- opt, _ = parser.parse_known_args() # parse again with new defaults
83
-
84
- # modify dataset-related parser options
85
- dataset_name = opt.dataset_mode
86
- dataset_option_setter = data.get_option_setter(dataset_name)
87
- parser = dataset_option_setter(parser, self.isTrain)
88
-
89
- # save and return the parser
90
- self.parser = parser
91
- return parser.parse_args()
92
-
93
- def print_options(self, opt):
94
- """Print and save options
95
-
96
- It will print both current options and default values(if different).
97
- It will save options into a text file / [checkpoints_dir] / opt.txt
98
- """
99
- message = ''
100
- message += '----------------- Options ---------------\n'
101
- for k, v in sorted(vars(opt).items()):
102
- comment = ''
103
- default = self.parser.get_default(k)
104
- if v != default:
105
- comment = '\t[default: %s]' % str(default)
106
- message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
107
- message += '----------------- End -------------------'
108
- print(message)
109
-
110
- # save to the disk
111
- expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
112
- util.mkdirs(expr_dir)
113
- file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
114
- with open(file_name, 'wt') as opt_file:
115
- opt_file.write(message)
116
- opt_file.write('\n')
117
-
118
- def parse(self):
119
- """Parse our options, create checkpoints directory suffix, and set up gpu device."""
120
- opt = self.gather_options()
121
- opt.isTrain = self.isTrain # train or test
122
-
123
- # process opt.suffix
124
- if opt.suffix:
125
- suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
126
- opt.name = opt.name + suffix
127
-
128
- self.print_options(opt)
129
-
130
- # set gpu ids
131
- str_ids = opt.gpu_ids.split(',')
132
- opt.gpu_ids = []
133
- for str_id in str_ids:
134
- id = int(str_id)
135
- if id >= 0:
136
- opt.gpu_ids.append(id)
137
- if len(opt.gpu_ids) > 0:
138
- torch.cuda.set_device(opt.gpu_ids[0])
139
-
140
- self.opt = opt
141
- return self.opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/data_utils.py DELETED
@@ -1,37 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
-
4
- import os
5
-
6
- from PIL import Image
7
-
8
- IMG_EXTENSIONS = [
9
- '.jpg', '.JPG', '.jpeg', '.JPEG',
10
- '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
11
- ]
12
-
13
-
14
- def is_image_file(filename):
15
- return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
16
-
17
-
18
- def tensor2im(var):
19
- # var shape: (3, H, W)
20
- var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
21
- var = ((var + 1) / 2)
22
- var[var < 0] = 0
23
- var[var > 1] = 1
24
- var = var * 255
25
- return Image.fromarray(var.astype('uint8'))
26
-
27
-
28
- def make_dataset(dir):
29
- images = []
30
- assert os.path.isdir(dir), '%s is not a valid directory' % dir
31
- for root, _, fnames in sorted(os.walk(dir)):
32
- for fname in fnames:
33
- if is_image_file(fname):
34
- path = os.path.join(root, fname)
35
- fname = fname.split('.')[0]
36
- images.append((fname, path))
37
- return images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py DELETED
@@ -1,16 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- plugins=[
5
- dict(
6
- cfg=dict(
7
- type='GeneralizedAttention',
8
- spatial_range=-1,
9
- num_heads=8,
10
- attention_type='0010',
11
- kv_stride=2),
12
- stages=(False, False, True, True),
13
- position='after_conv2')
14
- ],
15
- dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
16
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet101_caffe',
4
- backbone=dict(depth=101))
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_3x_ms_hybrid_base/config.py DELETED
@@ -1,82 +0,0 @@
1
- _base_ = [
2
- '../../configs/_base_/models/mask_rcnn_uniformer_fpn.py',
3
- '../../configs/_base_/datasets/coco_instance.py',
4
- '../../configs/_base_/schedules/schedule_1x.py',
5
- '../../configs/_base_/default_runtime.py'
6
- ]
7
-
8
- model = dict(
9
- backbone=dict(
10
- embed_dim=[64, 128, 320, 512],
11
- layers=[5, 8, 20, 7],
12
- head_dim=64,
13
- drop_path_rate=0.3,
14
- use_checkpoint=True,
15
- checkpoint_num=[0, 0, 20, 0],
16
- windows=False,
17
- hybrid=True,
18
- window_size=14
19
- ),
20
- neck=dict(in_channels=[64, 128, 320, 512]))
21
-
22
- img_norm_cfg = dict(
23
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
24
-
25
- # augmentation strategy originates from DETR / Sparse RCNN
26
- train_pipeline = [
27
- dict(type='LoadImageFromFile'),
28
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
29
- dict(type='RandomFlip', flip_ratio=0.5),
30
- dict(type='AutoAugment',
31
- policies=[
32
- [
33
- dict(type='Resize',
34
- img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
35
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
36
- (736, 1333), (768, 1333), (800, 1333)],
37
- multiscale_mode='value',
38
- keep_ratio=True)
39
- ],
40
- [
41
- dict(type='Resize',
42
- img_scale=[(400, 1333), (500, 1333), (600, 1333)],
43
- multiscale_mode='value',
44
- keep_ratio=True),
45
- dict(type='RandomCrop',
46
- crop_type='absolute_range',
47
- crop_size=(384, 600),
48
- allow_negative_crop=True),
49
- dict(type='Resize',
50
- img_scale=[(480, 1333), (512, 1333), (544, 1333),
51
- (576, 1333), (608, 1333), (640, 1333),
52
- (672, 1333), (704, 1333), (736, 1333),
53
- (768, 1333), (800, 1333)],
54
- multiscale_mode='value',
55
- override=True,
56
- keep_ratio=True)
57
- ]
58
- ]),
59
- dict(type='Normalize', **img_norm_cfg),
60
- dict(type='Pad', size_divisor=32),
61
- dict(type='DefaultFormatBundle'),
62
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
63
- ]
64
- data = dict(train=dict(pipeline=train_pipeline))
65
-
66
- optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
67
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
68
- 'relative_position_bias_table': dict(decay_mult=0.),
69
- 'norm': dict(decay_mult=0.)}))
70
- lr_config = dict(step=[27, 33])
71
- runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
72
-
73
- # do not use mmdet version fp16
74
- fp16 = None
75
- optimizer_config = dict(
76
- type="DistOptimizerHook",
77
- update_interval=1,
78
- grad_clip=None,
79
- coalesce=True,
80
- bucket_size_mb=-1,
81
- use_fp16=True,
82
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py DELETED
@@ -1,41 +0,0 @@
1
- import torch
2
-
3
- from ..builder import BBOX_SAMPLERS
4
- from .base_sampler import BaseSampler
5
- from .sampling_result import SamplingResult
6
-
7
-
8
- @BBOX_SAMPLERS.register_module()
9
- class PseudoSampler(BaseSampler):
10
- """A pseudo sampler that does not do sampling actually."""
11
-
12
- def __init__(self, **kwargs):
13
- pass
14
-
15
- def _sample_pos(self, **kwargs):
16
- """Sample positive samples."""
17
- raise NotImplementedError
18
-
19
- def _sample_neg(self, **kwargs):
20
- """Sample negative samples."""
21
- raise NotImplementedError
22
-
23
- def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
24
- """Directly returns the positive and negative indices of samples.
25
-
26
- Args:
27
- assign_result (:obj:`AssignResult`): Assigned results
28
- bboxes (torch.Tensor): Bounding boxes
29
- gt_bboxes (torch.Tensor): Ground truth boxes
30
-
31
- Returns:
32
- :obj:`SamplingResult`: sampler results
33
- """
34
- pos_inds = torch.nonzero(
35
- assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
36
- neg_inds = torch.nonzero(
37
- assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
38
- gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
39
- sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
40
- assign_result, gt_flags)
41
- return sampling_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnnonSubmission/xai-cl/ssl_models/simclr2.py DELETED
@@ -1,214 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
5
-
6
- """
7
- from https://github.com/Separius/SimCLRv2-Pytorch
8
- """
9
-
10
- BATCH_NORM_EPSILON = 1e-5
11
- BATCH_NORM_DECAY = 0.9 # == pytorch's default value as well
12
-
13
- class BatchNormRelu(nn.Sequential):
14
-
15
- def __init__(self, num_channels, relu=True):
16
- super().__init__(nn.BatchNorm2d(num_channels, eps=BATCH_NORM_EPSILON),
17
- nn.ReLU() if relu else nn.Identity())
18
-
19
-
20
- def conv(in_channels, out_channels, kernel_size=3, stride=1, bias=False):
21
- return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
22
- stride=stride, padding=(kernel_size - 1) // 2, bias=bias)
23
-
24
-
25
- class SelectiveKernel(nn.Module):
26
-
27
- def __init__(self, in_channels, out_channels, stride, sk_ratio, min_dim=32):
28
- super().__init__()
29
- assert sk_ratio > 0.0
30
- self.main_conv = nn.Sequential(conv(in_channels, 2 * out_channels, stride=stride),
31
- BatchNormRelu(2 * out_channels))
32
- mid_dim = max(int(out_channels * sk_ratio), min_dim)
33
- self.mixing_conv = nn.Sequential(conv(out_channels, mid_dim, kernel_size=1),
34
- BatchNormRelu(mid_dim),
35
- conv(mid_dim, 2 * out_channels, kernel_size=1))
36
-
37
- def forward(self, x):
38
- x = self.main_conv(x)
39
- x = torch.stack(torch.chunk(x, 2, dim=1), dim=0) # 2, B, C, H, W
40
- g = x.sum(dim=0).mean(dim=[2, 3], keepdim=True)
41
- m = self.mixing_conv(g)
42
- m = torch.stack(torch.chunk(m, 2, dim=1), dim=0) # 2, B, C, 1, 1
43
- return (x * F.softmax(m, dim=0)).sum(dim=0)
44
-
45
-
46
- class Projection(nn.Module):
47
- def __init__(self, in_channels, out_channels, stride, sk_ratio=0):
48
- super().__init__()
49
- if sk_ratio > 0:
50
- self.shortcut = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)),
51
- nn.AvgPool2d(kernel_size=2, stride=stride, padding=0),
52
- conv(in_channels, out_channels, kernel_size=1))
53
- else:
54
- self.shortcut = conv(in_channels, out_channels, kernel_size=1, stride=stride)
55
-
56
- self.bn = BatchNormRelu(out_channels, relu=False)
57
-
58
- def forward(self, x):
59
- return self.bn(self.shortcut(x))
60
-
61
-
62
- class BottleneckBlock(nn.Module):
63
- expansion = 4
64
-
65
- def __init__(self, in_channels, out_channels, stride, sk_ratio=0, use_projection=False):
66
- super().__init__()
67
- if use_projection:
68
- self.projection = Projection(in_channels, out_channels * 4, stride, sk_ratio)
69
- else:
70
- self.projection = nn.Identity()
71
-
72
- ops = [conv(in_channels, out_channels, kernel_size=1), BatchNormRelu(out_channels)]
73
- if sk_ratio > 0:
74
- ops.append(SelectiveKernel(out_channels, out_channels, stride, sk_ratio))
75
- else:
76
- ops.append(conv(out_channels, out_channels, stride=stride))
77
- ops.append(BatchNormRelu(out_channels))
78
-
79
- ops.append(conv(out_channels, out_channels * 4, kernel_size=1))
80
- ops.append(BatchNormRelu(out_channels * 4, relu=False))
81
- self.net = nn.Sequential(*ops)
82
-
83
- def forward(self, x):
84
- shortcut = self.projection(x)
85
- return F.relu(shortcut + self.net(x))
86
-
87
-
88
- class Blocks(nn.Module):
89
- def __init__(self, num_blocks, in_channels, out_channels, stride, sk_ratio=0):
90
- super().__init__()
91
- self.blocks = nn.ModuleList([BottleneckBlock(in_channels, out_channels, stride, sk_ratio, True)])
92
- self.channels_out = out_channels * BottleneckBlock.expansion
93
- for _ in range(num_blocks - 1):
94
- self.blocks.append(BottleneckBlock(self.channels_out, out_channels, 1, sk_ratio))
95
-
96
- def forward(self, x):
97
- for b in self.blocks:
98
- x = b(x)
99
- return x
100
-
101
-
102
- class Stem(nn.Sequential):
103
- def __init__(self, sk_ratio, width_multiplier):
104
- ops = []
105
- channels = 64 * width_multiplier // 2
106
- if sk_ratio > 0:
107
- ops.append(conv(3, channels, stride=2))
108
- ops.append(BatchNormRelu(channels))
109
- ops.append(conv(channels, channels))
110
- ops.append(BatchNormRelu(channels))
111
- ops.append(conv(channels, channels * 2))
112
- else:
113
- ops.append(conv(3, channels * 2, kernel_size=7, stride=2))
114
- ops.append(BatchNormRelu(channels * 2))
115
- ops.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
116
- super().__init__(*ops)
117
-
118
-
119
- class ResNet(nn.Module):
120
- def __init__(self, layers, width_multiplier, sk_ratio):
121
- super().__init__()
122
- ops = [Stem(sk_ratio, width_multiplier)]
123
- channels_in = 64 * width_multiplier
124
- ops.append(Blocks(layers[0], channels_in, 64 * width_multiplier, 1, sk_ratio))
125
- channels_in = ops[-1].channels_out
126
- ops.append(Blocks(layers[1], channels_in, 128 * width_multiplier, 2, sk_ratio))
127
- channels_in = ops[-1].channels_out
128
- ops.append(Blocks(layers[2], channels_in, 256 * width_multiplier, 2, sk_ratio))
129
- channels_in = ops[-1].channels_out
130
- ops.append(Blocks(layers[3], channels_in, 512 * width_multiplier, 2, sk_ratio))
131
- channels_in = ops[-1].channels_out
132
- self.channels_out = channels_in
133
- self.net = nn.Sequential(*ops)
134
- self.fc = nn.Linear(channels_in, 1000)
135
-
136
- def forward(self, x, apply_fc=False):
137
- h = self.net(x).mean(dim=[2, 3])
138
- if apply_fc:
139
- h = self.fc(h)
140
- return h
141
-
142
-
143
- class ContrastiveHead(nn.Module):
144
- def __init__(self, channels_in, out_dim=128, num_layers=3):
145
- super().__init__()
146
- self.layers = nn.ModuleList()
147
- for i in range(num_layers):
148
- if i != num_layers - 1:
149
- dim, relu = channels_in, True
150
- else:
151
- dim, relu = out_dim, False
152
- self.layers.append(nn.Linear(channels_in, dim, bias=False))
153
- bn = nn.BatchNorm1d(dim, eps=BATCH_NORM_EPSILON, affine=True)
154
- if i == num_layers - 1:
155
- nn.init.zeros_(bn.bias)
156
- self.layers.append(bn)
157
- if relu:
158
- self.layers.append(nn.ReLU())
159
-
160
- def forward(self, x):
161
- for b in self.layers:
162
- x = b(x)
163
- return x
164
-
165
-
166
- def get_resnet(depth=50, width_multiplier=1, sk_ratio=0): # sk_ratio=0.0625 is recommended
167
- layers = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}[depth]
168
- resnet = ResNet(layers, width_multiplier, sk_ratio)
169
- return resnet, ContrastiveHead(resnet.channels_out)
170
-
171
-
172
- def name_to_params(checkpoint):
173
- sk_ratio = 0.0625 if '_sk1' in checkpoint else 0
174
- if 'r50_' in checkpoint:
175
- depth = 50
176
- elif 'r101_' in checkpoint:
177
- depth = 101
178
- elif 'r152_' in checkpoint:
179
- depth = 152
180
- else:
181
- raise NotImplementedError
182
-
183
- if '_1x_' in checkpoint:
184
- width = 1
185
- elif '_2x_' in checkpoint:
186
- width = 2
187
- elif '_3x_' in checkpoint:
188
- width = 3
189
- else:
190
- raise NotImplementedError
191
-
192
- return depth, width, sk_ratio
193
-
194
- class SimCLRv2(nn.Module):
195
- def __init__(self, model, head):
196
- super(SimCLRv2, self).__init__()
197
-
198
- self.encoder = model
199
- self.contrastive_head = head
200
-
201
- def forward(self, x):
202
- x = self.encoder(x)
203
- x = self.contrastive_head(x)
204
- return x
205
-
206
- def get_simclr2_model(ckpt_path):
207
- depth, width, sk_ratio = name_to_params(ckpt_path)
208
- model, head = get_resnet(depth, width, sk_ratio)
209
- checkpoint = torch.load('pretrained_models/simclr2_models/' + ckpt_path)
210
- model.load_state_dict(checkpoint['resnet'])
211
- head.load_state_dict(checkpoint['head'])
212
- del model.fc
213
- simclr2 = SimCLRv2(model, head)
214
- return simclr2.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Annotation-AI/segment-similarthings/app.py DELETED
@@ -1,17 +0,0 @@
1
- import os
2
-
3
-
4
- github_user = os.environ.get("GITHUB_USER")
5
- github_token = os.environ.get("GITHUB_TOKEN")
6
-
7
- repo_name = "annotation-ai/mlwiz-technical-demo"
8
-
9
- os.system(f"export GITHUB_USER={github_user}")
10
- os.system(f"export GITHUB_TOKEN={github_token}")
11
- os.system(f"git clone https://{github_user}:{github_token}@github.com/{repo_name}")
12
-
13
- cwd0 = os.getcwd()
14
- cwd1 = os.path.join(cwd0, "mlwiz-technical-demo/sam")
15
- os.chdir(cwd1)
16
- os.system("pip install -r requirements.txt")
17
- os.system("python app_segment_similarthings.py")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/options/base_options.py DELETED
@@ -1,134 +0,0 @@
1
- import argparse
2
- import os
3
- import torch
4
- import model
5
- from util import util
6
-
7
-
8
- class BaseOptions():
9
- def __init__(self):
10
- self.parser = argparse.ArgumentParser()
11
- self.initialized = False
12
-
13
- def initialize(self, parser):
14
- # base define
15
- parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment.')
16
- parser.add_argument('--model', type=str, default='tc', help='name of the model type. [pluralistic]')
17
- parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are save here')
18
- parser.add_argument('--which_iter', type=int, default='0', help='which iterations to load')
19
- parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load')
20
- parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0, 1, 2 use -1 for CPU')
21
- # data define
22
- parser.add_argument('--mask_type', type=int, default=[0,1,3], help='0:center,1:regular,2:irregular,3:external')
23
- parser.add_argument('--img_file', type=str, default='/data/dataset/train', help='training and testing dataset')
24
- parser.add_argument('--mask_file', type=str, default='none', help='load test mask')
25
- parser.add_argument('--img_nc', type=int, default=3, help='# of image channels')
26
- parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='preprocessing image at load time')
27
- parser.add_argument('--load_size', type=int, default=542, help='scale examples to this size')
28
- parser.add_argument('--fine_size', type=int, default=512, help='then crop to this size')
29
- parser.add_argument('--fixed_size', type=int, default=256, help='fixed the image size in S1 with transformer')
30
- parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the image')
31
- parser.add_argument('--data_powers', type=int, default=5, help='# times of the scale to 2 times')
32
- parser.add_argument('--reverse_mask', action='store_true', help='if specified, random reverse the mask region')
33
- parser.add_argument('--batch_size', type=int, default=8, help='input batch size')
34
- parser.add_argument('--nThreads', type=int, default=8, help='# threads for loading data')
35
- parser.add_argument('--no_shuffle', action='store_true', help='if true, takes examples serial')
36
- # display parameter define
37
- parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
38
- parser.add_argument('--display_id', type=int, default=None, help='display id of the web')
39
- parser.add_argument('--display_server', type=str, default="http://localhost", help='server of the web display')
40
- parser.add_argument('--display_env', type=str, default='main', help='display name (default is "main")')
41
- parser.add_argument('--display_port', type=int, default=8092, help='port of the web display')
42
- parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all examples in a single visidom web panel')
43
- # Encoder-Decoder define
44
- parser.add_argument('--ngf', type=int, default=32, help='# of gen filters in the last conv layer')
45
- parser.add_argument('--ndf', type=int, default=32, help='# of dis filters in the first conv layer')
46
- parser.add_argument('--num_res_blocks', type=int, default=2, help='# of residual block in the encoder and decoder layer')
47
- parser.add_argument('--netD', type=str, default='style', help='specify discriminator architecture ')
48
- parser.add_argument('--netG', type=str, default='diff', help='specify decoder architecture')
49
- parser.add_argument('--netE', type=str, default='diff', help='specify encoder architecture')
50
- parser.add_argument('--kernel_G', type=int, default=3, help='kernel size for the decoder')
51
- parser.add_argument('--kernel_E', type=int, default=1, help='kernel size for the encoder')
52
- parser.add_argument('--add_noise', action='store_true', help='if true, add noise to the decoder')
53
- parser.add_argument('--attn_E', action='store_true', help='if true, use attention in the encoder')
54
- parser.add_argument('--attn_G', action='store_true', help='if true, use attention in the decoder')
55
- parser.add_argument('--attn_D', action='store_true', help='if true, use attention in the decoder')
56
- parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
57
- parser.add_argument('--n_layers_G', type=int, default=4, help='# of down sample layers in the Encoder and Decoder')
58
- parser.add_argument('--norm', type=str, default='pixel', help='instance normalization or batch normalization [instance | batch | pixel | none]')
59
- parser.add_argument('--activation', type=str, default='leakyrelu', help='activation layer [relu | gelu | leakyrelu | none]')
60
- parser.add_argument('--init_type', type=str, default='kaiming', help='network initialization [normal | xavier | kaiming | orthogonal]')
61
- parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
62
- parser.add_argument('--lipip_path', type=str, default='./model/lpips/vgg.pth', help='the pretrained LIPPS model')
63
- # Transformer define
64
- parser.add_argument('--netT', type=str, default='original', help='specify transformer architecture')
65
- parser.add_argument('--embed_dim', type=int, default=512, help='the numbers of embedding dimension')
66
- parser.add_argument('--dropout', type=float, default=0., help='the dropout probability in transformer')
67
- parser.add_argument('--kernel_T', type=int, default=1, help='kernel size for the transformer projection')
68
- parser.add_argument('--n_encoders', type=int, default=12, help='the numbers of encoder in transformer')
69
- parser.add_argument('--n_decoders', type=int, default=0, help='the numbers of decoder in transformer')
70
- parser.add_argument('--embed_type', type=str, default='learned', choices=['learned', 'sine'])
71
- parser.add_argument('--top_k', type=int, default=10, help='sample the results on top k value')
72
- # VQ define
73
- parser.add_argument('--num_embeds', type=int, default=1024, help='the numbers of words for image')
74
- parser.add_argument('--use_pos_G', action='store_true', help='if true, position embedding in G')
75
- parser.add_argument('--word_size', type=int, default=16, help='the numbers of word for each image')
76
- self.initialized = True
77
- return parser
78
-
79
- def gather_options(self):
80
- """Add additional model-specific options"""
81
- if not self.initialized:
82
- parser = self.initialize(self.parser)
83
-
84
- # get basic options
85
- opt, _ = parser.parse_known_args()
86
-
87
- # modify the options for different models
88
- model_option_set = model.get_option_setter(opt.model)
89
- parser = model_option_set(parser, self.isTrain)
90
- opt = parser.parse_args()
91
-
92
- return opt
93
-
94
- def parse(self):
95
- """Parse the options"""
96
- opt = self.gather_options()
97
- opt.isTrain = self.isTrain
98
-
99
- self.print_options(opt)
100
-
101
- # set gpu ids
102
- str_ids = opt.gpu_ids.split(',')
103
- opt.gpu_ids = []
104
- for str_id in str_ids:
105
- id = int(str_id)
106
- if id >= 0:
107
- opt.gpu_ids.append(id)
108
- if len(opt.gpu_ids):
109
- torch.cuda.set_device(opt.gpu_ids[0])
110
-
111
- self.opt = opt
112
-
113
- return self.opt
114
-
115
- @staticmethod
116
- def print_options(opt):
117
- """print and save options"""
118
- print('--------------Options--------------')
119
- for k, v in sorted(vars(opt).items()):
120
- print('%s: %s' % (str(k), str(v)))
121
- print('----------------End----------------')
122
-
123
- # save to the disk
124
- expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
125
- util.mkdirs(expr_dir)
126
- if opt.isTrain:
127
- file_name = os.path.join(expr_dir, 'train_opt.txt')
128
- else:
129
- file_name = os.path.join(expr_dir, 'test_opt.txt')
130
- with open(file_name, 'wt') as opt_file:
131
- opt_file.write('--------------Options--------------\n')
132
- for k, v in sorted(vars(opt).items()):
133
- opt_file.write('%s: %s\n' % (str(k), str(v)))
134
- opt_file.write('----------------End----------------\n')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py DELETED
@@ -1,28 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import pickle
3
-
4
- from .base import BaseFileHandler
5
-
6
-
7
- class PickleHandler(BaseFileHandler):
8
-
9
- str_like = False
10
-
11
- def load_from_fileobj(self, file, **kwargs):
12
- return pickle.load(file, **kwargs)
13
-
14
- def load_from_path(self, filepath, **kwargs):
15
- return super(PickleHandler, self).load_from_path(
16
- filepath, mode='rb', **kwargs)
17
-
18
- def dump_to_str(self, obj, **kwargs):
19
- kwargs.setdefault('protocol', 2)
20
- return pickle.dumps(obj, **kwargs)
21
-
22
- def dump_to_fileobj(self, obj, file, **kwargs):
23
- kwargs.setdefault('protocol', 2)
24
- pickle.dump(obj, file, **kwargs)
25
-
26
- def dump_to_path(self, obj, filepath, **kwargs):
27
- super(PickleHandler, self).dump_to_path(
28
- obj, filepath, mode='wb', **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/datasets/cocogrounding_eval.py DELETED
@@ -1,269 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO. Midified by Shilong Liu.
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
8
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
9
- """
10
- COCO evaluator that works in distributed mode.
11
-
12
- Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
13
- The difference is that there is less copy-pasting from pycocotools
14
- in the end of the file, as python3 can suppress prints with contextlib
15
- """
16
- import contextlib
17
- import copy
18
- import os
19
-
20
- import numpy as np
21
- import pycocotools.mask as mask_util
22
- import torch
23
- from pycocotools.coco import COCO
24
- from pycocotools.cocoeval import COCOeval
25
-
26
- from groundingdino.util.misc import all_gather
27
-
28
-
29
- class CocoGroundingEvaluator(object):
30
- def __init__(self, coco_gt, iou_types, useCats=True):
31
- assert isinstance(iou_types, (list, tuple))
32
- coco_gt = copy.deepcopy(coco_gt)
33
- self.coco_gt = coco_gt
34
-
35
- self.iou_types = iou_types
36
- self.coco_eval = {}
37
- for iou_type in iou_types:
38
- self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
39
- self.coco_eval[iou_type].useCats = useCats
40
-
41
- self.img_ids = []
42
- self.eval_imgs = {k: [] for k in iou_types}
43
- self.useCats = useCats
44
-
45
- def update(self, predictions):
46
- img_ids = list(np.unique(list(predictions.keys())))
47
- self.img_ids.extend(img_ids)
48
-
49
- for iou_type in self.iou_types:
50
- results = self.prepare(predictions, iou_type)
51
-
52
- # suppress pycocotools prints
53
- with open(os.devnull, "w") as devnull:
54
- with contextlib.redirect_stdout(devnull):
55
- coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
56
-
57
- coco_eval = self.coco_eval[iou_type]
58
-
59
- coco_eval.cocoDt = coco_dt
60
- coco_eval.params.imgIds = list(img_ids)
61
- coco_eval.params.useCats = self.useCats
62
- img_ids, eval_imgs = evaluate(coco_eval)
63
-
64
- self.eval_imgs[iou_type].append(eval_imgs)
65
-
66
- def synchronize_between_processes(self):
67
- for iou_type in self.iou_types:
68
- self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
69
- create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
70
-
71
- def accumulate(self):
72
- for coco_eval in self.coco_eval.values():
73
- coco_eval.accumulate()
74
-
75
- def summarize(self):
76
- for iou_type, coco_eval in self.coco_eval.items():
77
- print("IoU metric: {}".format(iou_type))
78
- coco_eval.summarize()
79
-
80
- def prepare(self, predictions, iou_type):
81
- if iou_type == "bbox":
82
- return self.prepare_for_coco_detection(predictions)
83
- elif iou_type == "segm":
84
- return self.prepare_for_coco_segmentation(predictions)
85
- elif iou_type == "keypoints":
86
- return self.prepare_for_coco_keypoint(predictions)
87
- else:
88
- raise ValueError("Unknown iou type {}".format(iou_type))
89
-
90
- def prepare_for_coco_detection(self, predictions):
91
- coco_results = []
92
- for original_id, prediction in predictions.items():
93
- if len(prediction) == 0:
94
- continue
95
-
96
- boxes = prediction["boxes"]
97
- boxes = convert_to_xywh(boxes).tolist()
98
- scores = prediction["scores"].tolist()
99
- labels = prediction["labels"].tolist()
100
-
101
- coco_results.extend(
102
- [
103
- {
104
- "image_id": original_id,
105
- "category_id": labels[k],
106
- "bbox": box,
107
- "score": scores[k],
108
- }
109
- for k, box in enumerate(boxes)
110
- ]
111
- )
112
- return coco_results
113
-
114
- def prepare_for_coco_segmentation(self, predictions):
115
- coco_results = []
116
- for original_id, prediction in predictions.items():
117
- if len(prediction) == 0:
118
- continue
119
-
120
- scores = prediction["scores"]
121
- labels = prediction["labels"]
122
- masks = prediction["masks"]
123
-
124
- masks = masks > 0.5
125
-
126
- scores = prediction["scores"].tolist()
127
- labels = prediction["labels"].tolist()
128
-
129
- rles = [
130
- mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
131
- for mask in masks
132
- ]
133
- for rle in rles:
134
- rle["counts"] = rle["counts"].decode("utf-8")
135
-
136
- coco_results.extend(
137
- [
138
- {
139
- "image_id": original_id,
140
- "category_id": labels[k],
141
- "segmentation": rle,
142
- "score": scores[k],
143
- }
144
- for k, rle in enumerate(rles)
145
- ]
146
- )
147
- return coco_results
148
-
149
- def prepare_for_coco_keypoint(self, predictions):
150
- coco_results = []
151
- for original_id, prediction in predictions.items():
152
- if len(prediction) == 0:
153
- continue
154
-
155
- boxes = prediction["boxes"]
156
- boxes = convert_to_xywh(boxes).tolist()
157
- scores = prediction["scores"].tolist()
158
- labels = prediction["labels"].tolist()
159
- keypoints = prediction["keypoints"]
160
- keypoints = keypoints.flatten(start_dim=1).tolist()
161
-
162
- coco_results.extend(
163
- [
164
- {
165
- "image_id": original_id,
166
- "category_id": labels[k],
167
- "keypoints": keypoint,
168
- "score": scores[k],
169
- }
170
- for k, keypoint in enumerate(keypoints)
171
- ]
172
- )
173
- return coco_results
174
-
175
-
176
- def convert_to_xywh(boxes):
177
- xmin, ymin, xmax, ymax = boxes.unbind(1)
178
- return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
179
-
180
-
181
- def merge(img_ids, eval_imgs):
182
- all_img_ids = all_gather(img_ids)
183
- all_eval_imgs = all_gather(eval_imgs)
184
-
185
- merged_img_ids = []
186
- for p in all_img_ids:
187
- merged_img_ids.extend(p)
188
-
189
- merged_eval_imgs = []
190
- for p in all_eval_imgs:
191
- merged_eval_imgs.append(p)
192
-
193
- merged_img_ids = np.array(merged_img_ids)
194
- merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
195
-
196
- # keep only unique (and in sorted order) images
197
- merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
198
- merged_eval_imgs = merged_eval_imgs[..., idx]
199
-
200
- return merged_img_ids, merged_eval_imgs
201
-
202
-
203
- def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
204
- img_ids, eval_imgs = merge(img_ids, eval_imgs)
205
- img_ids = list(img_ids)
206
- eval_imgs = list(eval_imgs.flatten())
207
-
208
- coco_eval.evalImgs = eval_imgs
209
- coco_eval.params.imgIds = img_ids
210
- coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
211
-
212
-
213
- #################################################################
214
- # From pycocotools, just removed the prints and fixed
215
- # a Python3 bug about unicode not defined
216
- #################################################################
217
-
218
-
219
- def evaluate(self):
220
- """
221
- Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
222
- :return: None
223
- """
224
- # tic = time.time()
225
- # print('Running per image evaluation...')
226
- p = self.params
227
- # add backward compatibility if useSegm is specified in params
228
- if p.useSegm is not None:
229
- p.iouType = "segm" if p.useSegm == 1 else "bbox"
230
- print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
231
- # print('Evaluate annotation type *{}*'.format(p.iouType))
232
- p.imgIds = list(np.unique(p.imgIds))
233
- if p.useCats:
234
- p.catIds = list(np.unique(p.catIds))
235
- p.maxDets = sorted(p.maxDets)
236
- self.params = p
237
-
238
- self._prepare()
239
- # loop through images, area range, max detection number
240
- catIds = p.catIds if p.useCats else [-1]
241
-
242
- if p.iouType == "segm" or p.iouType == "bbox":
243
- computeIoU = self.computeIoU
244
- elif p.iouType == "keypoints":
245
- computeIoU = self.computeOks
246
- self.ious = {
247
- (imgId, catId): computeIoU(imgId, catId)
248
- for imgId in p.imgIds
249
- for catId in catIds}
250
-
251
- evaluateImg = self.evaluateImg
252
- maxDet = p.maxDets[-1]
253
- evalImgs = [
254
- evaluateImg(imgId, catId, areaRng, maxDet)
255
- for catId in catIds
256
- for areaRng in p.areaRng
257
- for imgId in p.imgIds
258
- ]
259
- # this is NOT in the pycocotools code, but could be done outside
260
- evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
261
- self._paramsEval = copy.deepcopy(self.params)
262
- # toc = time.time()
263
- # print('DONE (t={:0.2f}s).'.format(toc-tic))
264
- return p.imgIds, evalImgs
265
-
266
-
267
- #################################################################
268
- # end of straight copy from pycocotools, just removing the prints
269
- #################################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/lazyconfig_train_net.py DELETED
@@ -1,131 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
- """
4
- Training script using the new "LazyConfig" python config files.
5
-
6
- This scripts reads a given python config file and runs the training or evaluation.
7
- It can be used to train any models or dataset as long as they can be
8
- instantiated by the recursive construction defined in the given config file.
9
-
10
- Besides lazy construction of models, dataloader, etc., this scripts expects a
11
- few common configuration parameters currently defined in "configs/common/train.py".
12
- To add more complicated training logic, you can easily add other configs
13
- in the config file and implement a new train_net.py to handle them.
14
- """
15
- import logging
16
-
17
- from detectron2.checkpoint import DetectionCheckpointer
18
- from detectron2.config import LazyConfig, instantiate
19
- from detectron2.engine import (
20
- AMPTrainer,
21
- SimpleTrainer,
22
- default_argument_parser,
23
- default_setup,
24
- default_writers,
25
- hooks,
26
- launch,
27
- )
28
- from detectron2.engine.defaults import create_ddp_model
29
- from detectron2.evaluation import inference_on_dataset, print_csv_format
30
- from detectron2.utils import comm
31
-
32
- logger = logging.getLogger("detectron2")
33
-
34
-
35
- def do_test(cfg, model):
36
- if "evaluator" in cfg.dataloader:
37
- ret = inference_on_dataset(
38
- model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
39
- )
40
- print_csv_format(ret)
41
- return ret
42
-
43
-
44
- def do_train(args, cfg):
45
- """
46
- Args:
47
- cfg: an object with the following attributes:
48
- model: instantiate to a module
49
- dataloader.{train,test}: instantiate to dataloaders
50
- dataloader.evaluator: instantiate to evaluator for test set
51
- optimizer: instantaite to an optimizer
52
- lr_multiplier: instantiate to a fvcore scheduler
53
- train: other misc config defined in `configs/common/train.py`, including:
54
- output_dir (str)
55
- init_checkpoint (str)
56
- amp.enabled (bool)
57
- max_iter (int)
58
- eval_period, log_period (int)
59
- device (str)
60
- checkpointer (dict)
61
- ddp (dict)
62
- """
63
- model = instantiate(cfg.model)
64
- logger = logging.getLogger("detectron2")
65
- logger.info("Model:\n{}".format(model))
66
- model.to(cfg.train.device)
67
-
68
- cfg.optimizer.params.model = model
69
- optim = instantiate(cfg.optimizer)
70
-
71
- train_loader = instantiate(cfg.dataloader.train)
72
-
73
- model = create_ddp_model(model, **cfg.train.ddp)
74
- trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim)
75
- checkpointer = DetectionCheckpointer(
76
- model,
77
- cfg.train.output_dir,
78
- trainer=trainer,
79
- )
80
- trainer.register_hooks(
81
- [
82
- hooks.IterationTimer(),
83
- hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
84
- hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
85
- if comm.is_main_process()
86
- else None,
87
- hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
88
- hooks.PeriodicWriter(
89
- default_writers(cfg.train.output_dir, cfg.train.max_iter),
90
- period=cfg.train.log_period,
91
- )
92
- if comm.is_main_process()
93
- else None,
94
- ]
95
- )
96
-
97
- checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
98
- if args.resume and checkpointer.has_checkpoint():
99
- # The checkpoint stores the training iteration that just finished, thus we start
100
- # at the next iteration
101
- start_iter = trainer.iter + 1
102
- else:
103
- start_iter = 0
104
- trainer.train(start_iter, cfg.train.max_iter)
105
-
106
-
107
- def main(args):
108
- cfg = LazyConfig.load(args.config_file)
109
- cfg = LazyConfig.apply_overrides(cfg, args.opts)
110
- default_setup(cfg, args)
111
-
112
- if args.eval_only:
113
- model = instantiate(cfg.model)
114
- model.to(cfg.train.device)
115
- model = create_ddp_model(model)
116
- DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
117
- print(do_test(cfg, model))
118
- else:
119
- do_train(args, cfg)
120
-
121
-
122
- if __name__ == "__main__":
123
- args = default_argument_parser().parse_args()
124
- launch(
125
- main,
126
- args.num_gpus,
127
- num_machines=args.num_machines,
128
- machine_rank=args.machine_rank,
129
- dist_url=args.dist_url,
130
- args=(args,),
131
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Ganador Eleven 2020 Apk.md DELETED
@@ -1,67 +0,0 @@
1
-
2
- <h1>Descargar Ganar Once 2020 APK para Android</h1>
3
- <p>Si eres un fan de los juegos de fútbol, es posible que hayas oído hablar de Winning Eleven, uno de los juegos de fútbol más populares y realistas para dispositivos Android. Winning Eleven 2020 es la última versión de este juego, que ofrece muchas nuevas características y mejoras. En este artículo, te diremos qué es Winning Eleven 2020, cuáles son sus características, cómo descargarlo e instalarlo en tu dispositivo Android y cuáles son sus pros y contras. También responderemos algunas preguntas frecuentes sobre este juego. </p>
4
- <h2>descargar ganador eleven 2020 apk</h2><br /><p><b><b>Download</b> &#10004; <a href="https://bltlly.com/2v6MoF">https://bltlly.com/2v6MoF</a></b></p><br /><br />
5
- <h2>¿Qué está ganando Eleven 2020? </h2>
6
- <p>Winning Eleven 2020, también conocido como WE 2020, es un juego de fútbol desarrollado por AndroKim. Se basa en la serie Pro Evolution Soccer (PES), que es uno de los juegos de fútbol más populares del mundo. Winning Eleven 2020 no es un juego oficial de Konami, el desarrollador de PES, sino una versión modificada que ha sido modificada por los fans para incluir a los últimos jugadores, equipos, kits, estadios y ligas. También tiene algunas características exclusivas que no están disponibles en el juego original de PES. </p>
7
- <h3>Características de ganar once 2020</h3>
8
- <p>Ganar Eleven 2020 tiene muchas características que lo convierten en uno de los mejores juegos de fútbol para dispositivos Android. Estos son algunos de ellos:</p>
9
- <h4>Gráficos y sonido de alta calidad</h4>
10
- <p>El juego tiene gráficos y sonidos de alta calidad que te hacen sentir como si estuvieras viendo un partido de fútbol real. Los jugadores, estadios, multitudes y animaciones son muy realistas y detallados. El juego también tiene comentarios en diferentes idiomas, como inglés, español, francés y árabe.</p>
11
- <h4>Juego realista y física</h4>
12
-
13
- <h4>Varios modos y equipos</h4>
14
- <p>El juego tiene varios modos y equipos que te dan muchas opciones para jugar. Puedes jugar en diferentes modos, como exhibición, liga, copa, liga principal, multijugador en línea y entrenamiento. También puedes elegir entre más de 200 equipos de diferentes países y ligas, como Inglaterra, España, Italia, Alemania, Francia, Brasil, Argentina, Japón, Corea, China y más. También puedes crear tu propio equipo y personalizar su nombre, logo, kit, jugadores y formación. </p>
15
- <h4>Opciones y ajustes personalizables</h4>
16
- <p>El juego tiene opciones y ajustes personalizables que te permiten ajustar el juego según tus preferencias. Puede cambiar el nivel de dificultad, el ángulo de la cámara, el esquema de control, el volumen de sonido, el idioma y más. También puedes editar los nombres, rostros, peinados, accesorios, botas y estadísticas de los jugadores. </p>
17
- <h3>Cómo descargar e instalar Ganar Eleven 2020 APK? </h3>
18
- <p>Si desea descargar e instalar Winning Eleven 2020 APK en su dispositivo Android, es necesario seguir estos pasos:</p>
19
- <p></p>
20
- <h4>Paso 1: Habilitar fuentes desconocidas</h4>
21
- <p>Dado que Winning Eleven 2020 APK no está disponible en la Google Play Store o cualquier otra tienda de aplicaciones oficial, es necesario habilitar fuentes desconocidas en su dispositivo para que pueda instalar aplicaciones de fuentes de terceros. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </p>
22
- <h4>Paso 2: Descargar el archivo APK</h4>
23
- <p>Siguiente, es necesario descargar el archivo APK de Winning Eleven 2020 de una fuente confiable. Puede <p>Siguiente, es necesario descargar el archivo APK de Winning Eleven 2020 de una fuente confiable. Puede utilizar este enlace para descargar el archivo APK, que es de aproximadamente 150 MB de tamaño. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargar el archivo. </p>
24
- <h4>Paso 3: Instalar el archivo APK</h4>
25
-
26
- <h4>Paso 4: Iniciar el juego y disfrutar de</h4>
27
- <p>Una vez que se hace la instalación, puede iniciar el juego y disfrutar jugando Winning Eleven 2020 en su dispositivo Android. Verás el icono del juego en la pantalla de inicio o en el cajón de la aplicación. Toca en él y comienza a jugar. También puedes iniciar sesión con tu cuenta de Google Play Games para guardar tu progreso y logros. </p>
28
- <h3> Pros y contras de ganar once 2020 APK</h3>
29
- <p>Ganar Once 2020 APK tiene muchos pros y contras que usted debe considerar antes de descargar y jugar. Aquí están algunos de ellos:</p>
30
- <h4>Pros</h4>
31
- <ul>
32
- <li>El juego es gratis para descargar y jugar, a diferencia del juego oficial PES que requiere una suscripción o compras en la aplicación. </li>
33
- <li>El juego tiene muchas características y opciones que lo hacen más divertido y realista que el juego original de PES. </li>
34
- <li>El juego ha actualizado jugadores, equipos, kits, estadios y ligas que reflejan la temporada actual de fútbol. </li>
35
- <li>El juego tiene un modo multijugador en línea que te permite jugar con o contra otros jugadores de todo el mundo. </li>
36
- </ul>
37
- <h4>Contras</h4>
38
- <ul>
39
- <li>El juego no es un juego oficial de Konami, por lo que puede tener algunos errores, fallas o errores que afectan su rendimiento o compatibilidad. </li>
40
- <li>El juego puede no funcionar en algunos dispositivos o versiones de Android, especialmente los más antiguos. </li>
41
- <li>El juego puede requerir mucho espacio de almacenamiento y datos de Internet para descargar y jugar. </li>
42
- <li>El juego puede tener algunos anuncios o ventanas emergentes que pueden interrumpir su juego o experiencia. </li>
43
- </ul>
44
- <h2>Conclusión</h2>
45
-
46
- <h3>Preguntas frecuentes</h3>
47
- <p>Aquí hay algunas preguntas frecuentes sobre ganar once 2020 APK:</p>
48
- <ol>
49
- <li><b>Está ganando once 2020 APK seguro para descargar e instalar? </b></li>
50
- <p>Sí, Ganar Once 2020 APK es seguro para descargar e instalar, siempre y cuando se utiliza una fuente confiable como la que proporcionamos en este artículo. Sin embargo, siempre debe tener cuidado al descargar e instalar aplicaciones de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo o datos. </p>
51
- <li><b>Está ganando once 2020 APK legal para descargar y jugar? </b></li>
52
- <p>Sí, Ganar Once 2020 APK es legal para descargar y jugar, siempre y cuando no lo utilice para fines ilegales o poco éticos. Sin embargo, usted debe respetar los derechos de propiedad intelectual de Konami, el desarrollador de PES, y no reclamar o distribuir Winning Eleven 2020 APK como su propio trabajo. </p>
53
- <li><b>¿Cómo puedo actualizar Ganando Once 2020 APK? </b></li>
54
- <p>Para actualizar Winning Eleven 2020 APK, es necesario descargar e instalar la última versión del archivo APK de una fuente confiable como la que proporcionamos en este artículo. También debe eliminar la versión anterior del archivo APK de su dispositivo antes de instalar el nuevo. </p>
55
- <li><b>¿Cómo puedo arreglar Ganar Once 2020 APK errores o problemas? </b></li>
56
- <p>Si encuentra algún error o problema al descargar, instalar o jugar Winning Eleven 2020 APK, puede probar algunas de estas soluciones:</p>
57
- <ul>
58
- <li>Compruebe su conexión a Internet y asegúrese de que es estable y rápido. </li>
59
- <li>Borrar la caché y los datos del juego desde Configuración > Aplicaciones > Ganar Once 2020 > Almacenamiento > Borrar caché/ datos. </li> <li>Reinicie su dispositivo e intente iniciar el juego de nuevo. </li>
60
- <li>Desinstalar y volver a instalar el juego desde una fuente confiable como la que proporcionamos en este artículo. </li>
61
- <li>Póngase en contacto con el desarrollador de Winning Eleven 2020 APK a través de su sitio web o canales de medios sociales y reportar el problema. </li>
62
- </ul>
63
-
64
- <p>Sí, puedes jugar Ganando Once 2020 APK sin conexión a Internet. Sin embargo, no podrás acceder a algunas funciones, como el modo multijugador en línea, las actualizaciones o los anuncios. También necesitará una conexión a Internet para descargar e instalar el juego inicialmente. </p>
65
- </ol></p> 64aa2da5cf<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bonp/B/Dockerfile DELETED
@@ -1,21 +0,0 @@
1
- FROM node:18-bullseye-slim
2
-
3
- RUN apt-get update && \
4
-
5
- apt-get install -y git
6
-
7
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
8
-
9
- WORKDIR /app
10
-
11
- RUN npm install
12
-
13
- COPY Dockerfile greeting.md* .env* ./
14
-
15
- RUN npm run build
16
-
17
- EXPOSE 7860
18
-
19
- ENV NODE_ENV=production
20
-
21
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BrunoHempel775/Byzu/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Byzu
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/detection_checkpoint.py DELETED
@@ -1,59 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import pickle
3
- from fvcore.common.checkpoint import Checkpointer
4
- from fvcore.common.file_io import PathManager
5
-
6
- import detectron2.utils.comm as comm
7
-
8
- from .c2_model_loading import align_and_update_state_dicts
9
-
10
-
11
- class DetectionCheckpointer(Checkpointer):
12
- """
13
- Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2
14
- model zoo, and apply conversions for legacy models.
15
- """
16
-
17
- def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
18
- is_main_process = comm.is_main_process()
19
- super().__init__(
20
- model,
21
- save_dir,
22
- save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
23
- **checkpointables,
24
- )
25
-
26
- def _load_file(self, filename):
27
- if filename.endswith(".pkl"):
28
- with PathManager.open(filename, "rb") as f:
29
- data = pickle.load(f, encoding="latin1")
30
- if "model" in data and "__author__" in data:
31
- # file is in Detectron2 model zoo format
32
- self.logger.info("Reading a file from '{}'".format(data["__author__"]))
33
- return data
34
- else:
35
- # assume file is from Caffe2 / Detectron1 model zoo
36
- if "blobs" in data:
37
- # Detection models have "blobs", but ImageNet models don't
38
- data = data["blobs"]
39
- data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
40
- return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
41
-
42
- loaded = super()._load_file(filename) # load native pth checkpoint
43
- if "model" not in loaded:
44
- loaded = {"model": loaded}
45
- return loaded
46
-
47
- def _load_model(self, checkpoint):
48
- if checkpoint.get("matching_heuristics", False):
49
- self._convert_ndarray_to_tensor(checkpoint["model"])
50
- # convert weights by name-matching heuristics
51
- model_state_dict = self.model.state_dict()
52
- align_and_update_state_dicts(
53
- model_state_dict,
54
- checkpoint["model"],
55
- c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
56
- )
57
- checkpoint["model"] = model_state_dict
58
- # for non-caffe2 models, use standard ways to load it
59
- super()._load_model(checkpoint)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/async/for_each.h DELETED
@@ -1,119 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a for_each of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file async/for_each.h
18
- * \brief Functions for asynchronously iterating over the elements of a range.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/detail/cpp14_required.h>
25
-
26
- #if THRUST_CPP_DIALECT >= 2014
27
-
28
- #include <thrust/detail/static_assert.h>
29
- #include <thrust/detail/select_system.h>
30
- #include <thrust/type_traits/remove_cvref.h>
31
- #include <thrust/system/detail/adl/async/for_each.h>
32
-
33
- #include <thrust/event.h>
34
-
35
- namespace thrust
36
- {
37
-
38
- namespace async
39
- {
40
-
41
- namespace unimplemented
42
- {
43
-
44
- template <
45
- typename DerivedPolicy
46
- , typename ForwardIt, typename Sentinel, typename UnaryFunction
47
- >
48
- __host__
49
- event<DerivedPolicy>
50
- async_for_each(
51
- thrust::execution_policy<DerivedPolicy>&, ForwardIt, Sentinel, UnaryFunction
52
- )
53
- {
54
- THRUST_STATIC_ASSERT_MSG(
55
- (thrust::detail::depend_on_instantiation<ForwardIt, false>::value)
56
- , "this algorithm is not implemented for the specified system"
57
- );
58
- return {};
59
- }
60
-
61
- } // namespace unimplemented
62
-
63
- namespace for_each_detail
64
- {
65
-
66
- using thrust::async::unimplemented::async_for_each;
67
-
68
- struct for_each_fn final
69
- {
70
- template <
71
- typename DerivedPolicy
72
- , typename ForwardIt, typename Sentinel, typename UnaryFunction
73
- >
74
- __host__
75
- static auto call(
76
- thrust::detail::execution_policy_base<DerivedPolicy> const& exec
77
- , ForwardIt&& first, Sentinel&& last
78
- , UnaryFunction&& f
79
- )
80
- // ADL dispatch.
81
- THRUST_RETURNS(
82
- async_for_each(
83
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
84
- , THRUST_FWD(first), THRUST_FWD(last)
85
- , THRUST_FWD(f)
86
- )
87
- )
88
-
89
- template <typename ForwardIt, typename Sentinel, typename UnaryFunction>
90
- __host__
91
- static auto call(ForwardIt&& first, Sentinel&& last, UnaryFunction&& f)
92
- THRUST_RETURNS(
93
- for_each_fn::call(
94
- thrust::detail::select_system(
95
- typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
96
- )
97
- , THRUST_FWD(first), THRUST_FWD(last)
98
- , THRUST_FWD(f)
99
- )
100
- )
101
-
102
- template <typename... Args>
103
- THRUST_NODISCARD __host__
104
- auto operator()(Args&&... args) const
105
- THRUST_RETURNS(
106
- call(THRUST_FWD(args)...)
107
- )
108
- };
109
-
110
- } // namespace for_each_detail
111
-
112
- THRUST_INLINE_CONSTANT for_each_detail::for_each_fn for_each{};
113
-
114
- } // namespace async
115
-
116
- } // end namespace thrust
117
-
118
- #endif
119
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/temporary_array.h DELETED
@@ -1,181 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file temporary_array.h
18
- * \brief Container-like class temporary storage inside algorithms.
19
- */
20
-
21
- #pragma once
22
-
23
- namespace thrust
24
- {
25
- namespace detail
26
- {
27
-
28
- // Forward declare temporary_array, as it's used by the CUDA copy backend, which
29
- // is included in contiguous_storage's definition.
30
- template<typename T, typename System>
31
- class temporary_array;
32
-
33
- } // end detail
34
- } // end thrust
35
-
36
- #include <thrust/detail/config.h>
37
- #include <thrust/iterator/iterator_traits.h>
38
- #include <thrust/iterator/detail/tagged_iterator.h>
39
- #include <thrust/detail/contiguous_storage.h>
40
- #include <thrust/detail/allocator/temporary_allocator.h>
41
- #include <thrust/detail/allocator/no_throw_allocator.h>
42
- #include <thrust/detail/memory_wrapper.h>
43
-
44
- namespace thrust
45
- {
46
- namespace detail
47
- {
48
-
49
-
50
- template<typename T, typename System>
51
- class temporary_array
52
- : public contiguous_storage<
53
- T,
54
- no_throw_allocator<
55
- temporary_allocator<T,System>
56
- >
57
- >
58
- {
59
- private:
60
- typedef contiguous_storage<
61
- T,
62
- no_throw_allocator<
63
- temporary_allocator<T,System>
64
- >
65
- > super_t;
66
-
67
- // to help out the constructor
68
- typedef no_throw_allocator<temporary_allocator<T,System> > alloc_type;
69
-
70
- public:
71
- typedef typename super_t::size_type size_type;
72
-
73
- __host__ __device__
74
- temporary_array(thrust::execution_policy<System> &system);
75
-
76
- __host__ __device__
77
- temporary_array(thrust::execution_policy<System> &system, size_type n);
78
-
79
- // provide a kill-switch to explicitly avoid initialization
80
- __host__ __device__
81
- temporary_array(int uninit, thrust::execution_policy<System> &system, size_type n);
82
-
83
- template<typename InputIterator>
84
- __host__ __device__
85
- temporary_array(thrust::execution_policy<System> &system,
86
- InputIterator first,
87
- size_type n);
88
-
89
- template<typename InputIterator, typename InputSystem>
90
- __host__ __device__
91
- temporary_array(thrust::execution_policy<System> &system,
92
- thrust::execution_policy<InputSystem> &input_system,
93
- InputIterator first,
94
- size_type n);
95
-
96
- template<typename InputIterator>
97
- __host__ __device__
98
- temporary_array(thrust::execution_policy<System> &system,
99
- InputIterator first,
100
- InputIterator last);
101
-
102
- template<typename InputSystem, typename InputIterator>
103
- __host__ __device__
104
- temporary_array(thrust::execution_policy<System> &system,
105
- thrust::execution_policy<InputSystem> &input_system,
106
- InputIterator first,
107
- InputIterator last);
108
-
109
- __host__ __device__
110
- ~temporary_array();
111
- }; // end temporary_array
112
-
113
-
114
- // XXX eliminate this when we do ranges for real
115
- template<typename Iterator, typename System>
116
- class tagged_iterator_range
117
- {
118
- public:
119
- typedef thrust::detail::tagged_iterator<Iterator,System> iterator;
120
-
121
- template<typename Ignored1, typename Ignored2>
122
- tagged_iterator_range(const Ignored1 &, const Ignored2 &, Iterator first, Iterator last)
123
- : m_begin(first),
124
- m_end(last)
125
- {}
126
-
127
- iterator begin(void) const { return m_begin; }
128
- iterator end(void) const { return m_end; }
129
-
130
- private:
131
- iterator m_begin, m_end;
132
- };
133
-
134
-
135
- // if FromSystem is convertible to ToSystem, then just make a shallow
136
- // copy of the range. else, use a temporary_array
137
- // note that the resulting iterator is explicitly tagged with ToSystem either way
138
- template<typename Iterator, typename FromSystem, typename ToSystem>
139
- struct move_to_system_base
140
- : public eval_if<
141
- is_convertible<
142
- FromSystem,
143
- ToSystem
144
- >::value,
145
- identity_<
146
- tagged_iterator_range<Iterator,ToSystem>
147
- >,
148
- identity_<
149
- temporary_array<
150
- typename thrust::iterator_value<Iterator>::type,
151
- ToSystem
152
- >
153
- >
154
- >
155
- {};
156
-
157
-
158
- template<typename Iterator, typename FromSystem, typename ToSystem>
159
- class move_to_system
160
- : public move_to_system_base<
161
- Iterator,
162
- FromSystem,
163
- ToSystem
164
- >::type
165
- {
166
- typedef typename move_to_system_base<Iterator,FromSystem,ToSystem>::type super_t;
167
-
168
- public:
169
- move_to_system(thrust::execution_policy<FromSystem> &from_system,
170
- thrust::execution_policy<ToSystem> &to_system,
171
- Iterator first,
172
- Iterator last)
173
- : super_t(to_system, from_system, first, last) {}
174
- };
175
-
176
-
177
- } // end detail
178
- } // end thrust
179
-
180
- #include <thrust/detail/temporary_array.inl>
181
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/per_device_resource.h DELETED
@@ -1,47 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/execution_policy.h>
21
- #include <thrust/system/detail/generic/tag.h>
22
- #include <thrust/mr/memory_resource.h>
23
- #include <thrust/detail/execution_policy.h>
24
-
25
- namespace thrust
26
- {
27
- namespace system
28
- {
29
- namespace detail
30
- {
31
- namespace generic
32
- {
33
-
34
-
35
- template<typename MR, typename DerivedPolicy>
36
- __host__
37
- MR * get_per_device_resource(thrust::detail::execution_policy_base<DerivedPolicy>&)
38
- {
39
- return mr::get_global_resource<MR>();
40
- }
41
-
42
-
43
- } // end generic
44
- } // end detail
45
- } // end system
46
- } // end thrust
47
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/filter_sharded_dataset.py DELETED
@@ -1,69 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
-
4
- import math
5
- import os
6
- import random
7
-
8
- import braceexpand
9
- import webdataset as wds
10
-
11
- DEFAULT_CATS_FILE = os.path.join(os.path.dirname(__file__), '..', 'configs', 'places2-categories_157.txt')
12
-
13
- def is_good_key(key, cats):
14
- return any(c in key for c in cats)
15
-
16
-
17
- def main(args):
18
- if args.categories == 'nofilter':
19
- good_categories = None
20
- else:
21
- with open(args.categories, 'r') as f:
22
- good_categories = set(line.strip().split(' ')[0] for line in f if line.strip())
23
-
24
- all_input_files = list(braceexpand.braceexpand(args.infile))
25
- chunk_size = int(math.ceil(len(all_input_files) / args.n_read_streams))
26
-
27
- input_iterators = [iter(wds.Dataset(all_input_files[start : start + chunk_size]).shuffle(args.shuffle_buffer))
28
- for start in range(0, len(all_input_files), chunk_size)]
29
- output_datasets = [wds.ShardWriter(args.outpattern.format(i)) for i in range(args.n_write_streams)]
30
-
31
- good_readers = list(range(len(input_iterators)))
32
- step_i = 0
33
- good_samples = 0
34
- bad_samples = 0
35
- while len(good_readers) > 0:
36
- if step_i % args.print_freq == 0:
37
- print(f'Iterations done {step_i}; readers alive {good_readers}; good samples {good_samples}; bad samples {bad_samples}')
38
-
39
- step_i += 1
40
-
41
- ri = random.choice(good_readers)
42
- try:
43
- sample = next(input_iterators[ri])
44
- except StopIteration:
45
- good_readers = list(set(good_readers) - {ri})
46
- continue
47
-
48
- if good_categories is not None and not is_good_key(sample['__key__'], good_categories):
49
- bad_samples += 1
50
- continue
51
-
52
- wi = random.randint(0, args.n_write_streams - 1)
53
- output_datasets[wi].write(sample)
54
- good_samples += 1
55
-
56
-
57
- if __name__ == '__main__':
58
- import argparse
59
-
60
- aparser = argparse.ArgumentParser()
61
- aparser.add_argument('--categories', type=str, default=DEFAULT_CATS_FILE)
62
- aparser.add_argument('--shuffle-buffer', type=int, default=10000)
63
- aparser.add_argument('--n-read-streams', type=int, default=10)
64
- aparser.add_argument('--n-write-streams', type=int, default=10)
65
- aparser.add_argument('--print-freq', type=int, default=1000)
66
- aparser.add_argument('infile', type=str)
67
- aparser.add_argument('outpattern', type=str)
68
-
69
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/config/compat.py DELETED
@@ -1,229 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- """
3
- Backward compatibility of configs.
4
-
5
- Instructions to bump version:
6
- + It's not needed to bump version if new keys are added.
7
- It's only needed when backward-incompatible changes happen
8
- (i.e., some existing keys disappear, or the meaning of a key changes)
9
- + To bump version, do the following:
10
- 1. Increment _C.VERSION in defaults.py
11
- 2. Add a converter in this file.
12
-
13
- Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
14
- and a function "downgrade" which in-place downgrades config from X to X-1
15
-
16
- In each function, VERSION is left unchanged.
17
-
18
- Each converter assumes that its input has the relevant keys
19
- (i.e., the input is not a partial config).
20
- 3. Run the tests (test_config.py) to make sure the upgrade & downgrade
21
- functions are consistent.
22
- """
23
-
24
- import logging
25
- from typing import List, Optional, Tuple
26
-
27
- from .config import CfgNode as CN
28
- from .defaults import _C
29
-
30
- __all__ = ["upgrade_config", "downgrade_config"]
31
-
32
-
33
- def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
34
- """
35
- Upgrade a config from its current version to a newer version.
36
-
37
- Args:
38
- cfg (CfgNode):
39
- to_version (int): defaults to the latest version.
40
- """
41
- cfg = cfg.clone()
42
- if to_version is None:
43
- to_version = _C.VERSION
44
-
45
- assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
46
- cfg.VERSION, to_version
47
- )
48
- for k in range(cfg.VERSION, to_version):
49
- converter = globals()["ConverterV" + str(k + 1)]
50
- converter.upgrade(cfg)
51
- cfg.VERSION = k + 1
52
- return cfg
53
-
54
-
55
- def downgrade_config(cfg: CN, to_version: int) -> CN:
56
- """
57
- Downgrade a config from its current version to an older version.
58
-
59
- Args:
60
- cfg (CfgNode):
61
- to_version (int):
62
-
63
- Note:
64
- A general downgrade of arbitrary configs is not always possible due to the
65
- different functionalities in different versions.
66
- The purpose of downgrade is only to recover the defaults in old versions,
67
- allowing it to load an old partial yaml config.
68
- Therefore, the implementation only needs to fill in the default values
69
- in the old version when a general downgrade is not possible.
70
- """
71
- cfg = cfg.clone()
72
- assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
73
- cfg.VERSION, to_version
74
- )
75
- for k in range(cfg.VERSION, to_version, -1):
76
- converter = globals()["ConverterV" + str(k)]
77
- converter.downgrade(cfg)
78
- cfg.VERSION = k - 1
79
- return cfg
80
-
81
-
82
- def guess_version(cfg: CN, filename: str) -> int:
83
- """
84
- Guess the version of a partial config where the VERSION field is not specified.
85
- Returns the version, or the latest if cannot make a guess.
86
-
87
- This makes it easier for users to migrate.
88
- """
89
- logger = logging.getLogger(__name__)
90
-
91
- def _has(name: str) -> bool:
92
- cur = cfg
93
- for n in name.split("."):
94
- if n not in cur:
95
- return False
96
- cur = cur[n]
97
- return True
98
-
99
- # Most users' partial configs have "MODEL.WEIGHT", so guess on it
100
- ret = None
101
- if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
102
- ret = 1
103
-
104
- if ret is not None:
105
- logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
106
- else:
107
- ret = _C.VERSION
108
- logger.warning(
109
- "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
110
- filename, ret
111
- )
112
- )
113
- return ret
114
-
115
-
116
- def _rename(cfg: CN, old: str, new: str) -> None:
117
- old_keys = old.split(".")
118
- new_keys = new.split(".")
119
-
120
- def _set(key_seq: List[str], val: str) -> None:
121
- cur = cfg
122
- for k in key_seq[:-1]:
123
- if k not in cur:
124
- cur[k] = CN()
125
- cur = cur[k]
126
- cur[key_seq[-1]] = val
127
-
128
- def _get(key_seq: List[str]) -> CN:
129
- cur = cfg
130
- for k in key_seq:
131
- cur = cur[k]
132
- return cur
133
-
134
- def _del(key_seq: List[str]) -> None:
135
- cur = cfg
136
- for k in key_seq[:-1]:
137
- cur = cur[k]
138
- del cur[key_seq[-1]]
139
- if len(cur) == 0 and len(key_seq) > 1:
140
- _del(key_seq[:-1])
141
-
142
- _set(new_keys, _get(old_keys))
143
- _del(old_keys)
144
-
145
-
146
- class _RenameConverter:
147
- """
148
- A converter that handles simple rename.
149
- """
150
-
151
- RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
152
-
153
- @classmethod
154
- def upgrade(cls, cfg: CN) -> None:
155
- for old, new in cls.RENAME:
156
- _rename(cfg, old, new)
157
-
158
- @classmethod
159
- def downgrade(cls, cfg: CN) -> None:
160
- for old, new in cls.RENAME[::-1]:
161
- _rename(cfg, new, old)
162
-
163
-
164
- class ConverterV1(_RenameConverter):
165
- RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
166
-
167
-
168
- class ConverterV2(_RenameConverter):
169
- """
170
- A large bulk of rename, before public release.
171
- """
172
-
173
- RENAME = [
174
- ("MODEL.WEIGHT", "MODEL.WEIGHTS"),
175
- ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
176
- ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
177
- ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
178
- ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
179
- (
180
- "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
181
- "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
182
- ),
183
- (
184
- "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
185
- "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
186
- ),
187
- (
188
- "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
189
- "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
190
- ),
191
- ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
192
- ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
193
- ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
194
- ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
195
- ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
196
- ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
197
- ("TEST.AUG_ON", "TEST.AUG.ENABLED"),
198
- ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
199
- ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
200
- ("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
201
- ]
202
-
203
- @classmethod
204
- def upgrade(cls, cfg: CN) -> None:
205
- super().upgrade(cfg)
206
-
207
- if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
208
- _rename(
209
- cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
210
- )
211
- _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
212
- del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
213
- del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
214
- else:
215
- _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
216
- _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
217
- del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
218
- del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
219
- del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
220
-
221
- @classmethod
222
- def downgrade(cls, cfg: CN) -> None:
223
- super().downgrade(cfg)
224
-
225
- _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
226
- _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
227
- cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
228
- cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
229
- cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/v-doc_abstractive_mac/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: VDoc-mac
3
- emoji: 🙆‍♂️
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.0.15
8
- python_version: 3.7.11
9
- app_file: interface.py
10
- pinned: false
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/wiki_process.py DELETED
@@ -1,46 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding:utf-8 _*-
3
- """
4
- @author:quincy qiang
5
- @license: Apache Licence
6
- @file: wiki_process.py
7
- @time: 2023/04/19
8
- @contact: [email protected]
9
- @software: PyCharm
10
- @description: https://blog.csdn.net/weixin_40871455/article/details/88822290
11
- """
12
- import logging
13
- import sys
14
- from gensim.corpora import WikiCorpus
15
-
16
- logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO)
17
- '''
18
- extract data from wiki dumps(*articles.xml.bz2) by gensim.
19
- @2019-3-26
20
- '''
21
-
22
-
23
- def help():
24
- print("Usage: python wikipro.py zhwiki-20190320-pages-articles-multistream.xml.bz2 wiki.zh.txt")
25
-
26
-
27
- if __name__ == '__main__':
28
- if len(sys.argv) < 3:
29
- help()
30
- sys.exit(1)
31
- logging.info("running %s" % ' '.join(sys.argv))
32
- inp, outp = sys.argv[1:3]
33
- i = 0
34
-
35
- output = open(outp, 'w', encoding='utf8')
36
- wiki = WikiCorpus(inp, dictionary={})
37
- for text in wiki.get_texts():
38
- output.write(" ".join(text) + "\n")
39
- i = i + 1
40
- if (i % 10000 == 0):
41
- logging.info("Save " + str(i) + " articles")
42
- output.close()
43
- logging.info("Finished saved " + str(i) + "articles")
44
-
45
- # 命令行下运行
46
- # python wikipro.py cache/zh_wikipedia/zhwiki-latest-pages-articles.xml.bz2 wiki.zh.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Choisuren/AnimeGANv3/app.py DELETED
@@ -1,67 +0,0 @@
1
- import os
2
- import cv2
3
- import gradio as gr
4
- import AnimeGANv3_src
5
-
6
-
7
- os.makedirs('output', exist_ok=True)
8
-
9
-
10
- def inference(img_path, Style, if_face=None):
11
- print(img_path, Style, if_face)
12
- try:
13
- img = cv2.imread(img_path)
14
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
15
- if Style == "AnimeGANv3_Arcane":
16
- f = "A"
17
- elif Style == "AnimeGANv3_Trump v1.0":
18
- f = "T"
19
- elif Style == "AnimeGANv3_Shinkai":
20
- f = "S"
21
- elif Style == "AnimeGANv3_PortraitSketch":
22
- f = "P"
23
- elif Style == "AnimeGANv3_Hayao":
24
- f = "H"
25
- elif Style == "AnimeGANv3_Disney v1.0":
26
- f = "D"
27
- elif Style == "AnimeGANv3_JP_face v1.0":
28
- f = "J"
29
- else:
30
- f = "U"
31
-
32
- try:
33
- det_face = True if if_face=="Yes" else False
34
- output = AnimeGANv3_src.Convert(img, f, det_face)
35
- save_path = f"output/out.{img_path.rsplit('.')[-1]}"
36
- cv2.imwrite(save_path, output[:, :, ::-1])
37
- return output, save_path
38
- except RuntimeError as error:
39
- print('Error', error)
40
- except Exception as error:
41
- print('global exception', error)
42
- return None, None
43
-
44
-
45
-
46
- gr.Interface(
47
- inference, [
48
- gr.inputs.Image(type="filepath", label="Input"),
49
- gr.Dropdown([
50
- 'AnimeGANv3_Hayao',
51
- 'AnimeGANv3_Shinkai',
52
- 'AnimeGANv3_Arcane',
53
- 'AnimeGANv3_USA',
54
- 'AnimeGANv3_Trump v1.0',
55
- 'AnimeGANv3_Disney v1.0',
56
- 'AnimeGANv3_PortraitSketch',
57
- 'AnimeGANv3_JP_face v1.0',
58
- ],
59
- type="value",
60
- value='AnimeGANv3_Hayao',
61
- label='AnimeGANv3 Style'),
62
- gr.inputs.Radio(['Yes', 'No'], type="value", default='No', label='Extract face'),
63
- ], [
64
- gr.outputs.Image(type="numpy", label="Output (The whole image)"),
65
- gr.outputs.File(label="Download the output image")
66
- ],
67
- allow_flagging="never").launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClassCat/Medical-Image-Classification-with-MONAI/app.py DELETED
@@ -1,68 +0,0 @@
1
-
2
-
3
- import torch
4
- from monai.networks.nets import DenseNet121
5
-
6
- import gradio as gr
7
-
8
- #from PIL import Image
9
-
10
- model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=6)
11
- model.load_state_dict(torch.load('weights/mednist_model.pth', map_location=torch.device('cpu')))
12
-
13
- from monai.transforms import (
14
- EnsureChannelFirst,
15
- Compose,
16
- LoadImage,
17
- ScaleIntensity,
18
- )
19
-
20
- test_transforms = Compose(
21
- [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity()]
22
- )
23
-
24
- class_names = [
25
- 'AbdomenCT', 'BreastMRI', 'CXR', 'ChestCT', 'Hand', 'HeadCT'
26
- ]
27
-
28
- import os, glob
29
-
30
- #examples_dir = './samples'
31
- #example_files = glob.glob(os.path.join(examples_dir, '*.jpg'))
32
-
33
- def classify_image(image_filepath):
34
- input = test_transforms(image_filepath)
35
-
36
- model.eval()
37
- with torch.no_grad():
38
- pred = model(input.unsqueeze(dim=0))
39
-
40
- prob = torch.nn.functional.softmax(pred[0], dim=0)
41
-
42
- confidences = {class_names[i]: float(prob[i]) for i in range(6)}
43
- print(confidences)
44
-
45
- return confidences
46
-
47
-
48
- with gr.Blocks(title="Medical Image Classification with MONAI - ClassCat",
49
- css=".gradio-container {background:mintcream;}"
50
- ) as demo:
51
- gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:royalblue;">Medical Image Classification with MONAI</div>""")
52
-
53
- with gr.Row():
54
- input_image = gr.Image(type="filepath", image_mode="L", shape=(64, 64))
55
- output_label=gr.Label(label="Probabilities", num_top_classes=3)
56
-
57
- send_btn = gr.Button("Infer")
58
- send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label)
59
-
60
- with gr.Row():
61
- gr.Examples(['./samples/mednist_AbdomenCT00.png'], label='Sample images : AbdomenCT', inputs=input_image)
62
- gr.Examples(['./samples/mednist_CXR02.png'], label='CXR', inputs=input_image)
63
- gr.Examples(['./samples/mednist_ChestCT08.png'], label='ChestCT', inputs=input_image)
64
- gr.Examples(['./samples/mednist_Hand01.png'], label='Hand', inputs=input_image)
65
- gr.Examples(['./samples/mednist_HeadCT07.png'], label='HeadCT', inputs=input_image)
66
-
67
- #demo.queue(concurrency_count=3)
68
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/css/checkbox.css DELETED
@@ -1,59 +0,0 @@
1
- .checkbox input {
2
- height: 0;
3
- width: 0;
4
- display: none;
5
- }
6
-
7
- .checkbox span {
8
- font-size: 0.875rem;
9
- color: var(--colour-3);
10
- margin-left: 4px;
11
- }
12
-
13
- .checkbox label:after {
14
- content: "";
15
- position: absolute;
16
- top: 50%;
17
- transform: translateY(-50%);
18
- left: 5px;
19
- width: 20px;
20
- height: 20px;
21
- background: var(--blur-border);
22
- border-radius: 90px;
23
- transition: 0.33s;
24
- }
25
-
26
- .checkbox input + label:after,
27
- .checkbox input:checked + label {
28
- background: var(--colour-3);
29
- }
30
-
31
- .checkbox input + label,
32
- .checkbox input:checked + label:after {
33
- background: var(--blur-border);
34
- }
35
-
36
- .checkbox input:checked + label:after {
37
- left: calc(100% - 5px - 20px);
38
- }
39
-
40
- @media screen and (max-width: 990px) {
41
- .checkbox span {
42
- font-size: 0.75rem;
43
- }
44
-
45
- .checkbox label {
46
- width: 25px;
47
- height: 15px;
48
- }
49
-
50
- .checkbox label:after {
51
- left: 2px;
52
- width: 10px;
53
- height: 10px;
54
- }
55
-
56
- .checkbox input:checked + label:after {
57
- left: calc(100% - 2px - 10px);
58
- }
59
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/解析JupyterNotebook.py DELETED
@@ -1,145 +0,0 @@
1
- from toolbox import update_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file
3
- fast_debug = True
4
-
5
-
6
- class PaperFileGroup():
7
- def __init__(self):
8
- self.file_paths = []
9
- self.file_contents = []
10
- self.sp_file_contents = []
11
- self.sp_file_index = []
12
- self.sp_file_tag = []
13
-
14
- # count_token
15
- from request_llm.bridge_all import model_info
16
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
17
- def get_token_num(txt): return len(
18
- enc.encode(txt, disallowed_special=()))
19
- self.get_token_num = get_token_num
20
-
21
- def run_file_split(self, max_token_limit=1900):
22
- """
23
- 将长文本分离开来
24
- """
25
- for index, file_content in enumerate(self.file_contents):
26
- if self.get_token_num(file_content) < max_token_limit:
27
- self.sp_file_contents.append(file_content)
28
- self.sp_file_index.append(index)
29
- self.sp_file_tag.append(self.file_paths[index])
30
- else:
31
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
32
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
33
- file_content, self.get_token_num, max_token_limit)
34
- for j, segment in enumerate(segments):
35
- self.sp_file_contents.append(segment)
36
- self.sp_file_index.append(index)
37
- self.sp_file_tag.append(
38
- self.file_paths[index] + f".part-{j}.txt")
39
-
40
-
41
-
42
- def parseNotebook(filename, enable_markdown=1):
43
- import json
44
-
45
- CodeBlocks = []
46
- with open(filename, 'r', encoding='utf-8', errors='replace') as f:
47
- notebook = json.load(f)
48
- for cell in notebook['cells']:
49
- if cell['cell_type'] == 'code' and cell['source']:
50
- # remove blank lines
51
- cell['source'] = [line for line in cell['source'] if line.strip()
52
- != '']
53
- CodeBlocks.append("".join(cell['source']))
54
- elif enable_markdown and cell['cell_type'] == 'markdown' and cell['source']:
55
- cell['source'] = [line for line in cell['source'] if line.strip()
56
- != '']
57
- CodeBlocks.append("Markdown:"+"".join(cell['source']))
58
-
59
- Code = ""
60
- for idx, code in enumerate(CodeBlocks):
61
- Code += f"This is {idx+1}th code block: \n"
62
- Code += code+"\n"
63
-
64
- return Code
65
-
66
-
67
- def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
68
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
69
-
70
- enable_markdown = plugin_kwargs.get("advanced_arg", "1")
71
- try:
72
- enable_markdown = int(enable_markdown)
73
- except ValueError:
74
- enable_markdown = 1
75
-
76
- pfg = PaperFileGroup()
77
-
78
- for fp in file_manifest:
79
- file_content = parseNotebook(fp, enable_markdown=enable_markdown)
80
- pfg.file_paths.append(fp)
81
- pfg.file_contents.append(file_content)
82
-
83
- # <-------- 拆分过长的IPynb文件 ---------->
84
- pfg.run_file_split(max_token_limit=1024)
85
- n_split = len(pfg.sp_file_contents)
86
-
87
- inputs_array = [r"This is a Jupyter Notebook file, tell me about Each Block in Chinese. Focus Just On Code." +
88
- r"If a block starts with `Markdown` which means it's a markdown block in ipynbipynb. " +
89
- r"Start a new line for a block and block num use Chinese." +
90
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
91
- inputs_show_user_array = [f"{f}的分析如下" for f in pfg.sp_file_tag]
92
- sys_prompt_array = ["You are a professional programmer."] * n_split
93
-
94
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
95
- inputs_array=inputs_array,
96
- inputs_show_user_array=inputs_show_user_array,
97
- llm_kwargs=llm_kwargs,
98
- chatbot=chatbot,
99
- history_array=[[""] for _ in range(n_split)],
100
- sys_prompt_array=sys_prompt_array,
101
- # max_workers=5, # OpenAI所允许的最大并行过载
102
- scroller_max_len=80
103
- )
104
-
105
- # <-------- 整理结果,退出 ---------->
106
- block_result = " \n".join(gpt_response_collection)
107
- chatbot.append(("解析的结果如下", block_result))
108
- history.extend(["解析的结果如下", block_result])
109
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
110
-
111
- # <-------- 写入文件,退出 ---------->
112
- res = write_results_to_file(history)
113
- chatbot.append(("完成了吗?", res))
114
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
115
-
116
- @CatchException
117
- def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
118
- chatbot.append([
119
- "函数插件功能?",
120
- "对IPynb文件进行解析。Contributor: codycjy."])
121
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
122
-
123
- history = [] # 清空历史
124
- import glob
125
- import os
126
- if os.path.exists(txt):
127
- project_folder = txt
128
- else:
129
- if txt == "":
130
- txt = '空空如也的输入栏'
131
- report_execption(chatbot, history,
132
- a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
133
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
134
- return
135
- if txt.endswith('.ipynb'):
136
- file_manifest = [txt]
137
- else:
138
- file_manifest = [f for f in glob.glob(
139
- f'{project_folder}/**/*.ipynb', recursive=True)]
140
- if len(file_manifest) == 0:
141
- report_execption(chatbot, history,
142
- a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
143
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
144
- return
145
- yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/docs/README_JP.md DELETED
@@ -1,302 +0,0 @@
1
- > **Note**
2
- >
3
- > このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。
4
- >
5
-
6
- # <img src="logo.png" width="40" > ChatGPT 学術最適化
7
-
8
- **このプロジェクトが好きだったら、スターをつけてください。もし、より使いやすい学術用のショートカットキーまたはファンクションプラグインを発明した場合は、issueを発行するかpull requestを作成してください。また、このプロジェクト自体によって翻訳されたREADMEは[英語説明書|](docs/README_EN.md)[日本語説明書|](docs/README_JP.md)[ロシア語説明書|](docs/README_RS.md)[フランス語説明書](docs/README_FR.md)もあります。**
9
-
10
- > **注意事項**
11
- >
12
- > 1. **赤色**のラベルが付いているファンクションプラグイン(ボタン)のみファイルを読み込めます。一部のプラグインはプラグインエリアのドロップダウンメニューにあります。新しいプラグインのPRを歓迎いたします!
13
- >
14
- > 2. このプロジェクトの各ファイルの機能は`self_analysis.md`(自己解析レポート)で詳しく説明されています。バージョンが追加されると、関連するファンクションプラグインをクリックして、GPTを呼び出して自己解析レポートを再生成することができます。一般的な質問は`wiki`にまとめられています。(`https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98`)
15
-
16
-
17
- <div align="center">
18
-
19
- 機能 | 説明
20
- --- | ---
21
- ワンクリック整形 | 論文の文法エラーを一括で正確に修正できます。
22
- ワンクリック日英翻訳 | 日英翻訳には、ワンクリックで対応できます。
23
- ワンクリックコード説明 | コードの正しい表示と説明が可能です。
24
- [カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーをサポートします。
25
- [プロキシサーバーの設定](https://www.bilibili.com/video/BV1rc411W7Dr) | プロキシサーバーの設定をサポートします。
26
- モジュラーデザイン | カスタム高階関数プラグインと[関数プラグイン]、プラグイン[ホット更新]のサポートが可能です。詳細は[こちら](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
27
- [自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン][ワンクリック理解](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード
28
- [プログラム解析機能](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] ワンクリックで別のPython/C/C++/Java/Lua/...プロジェクトツリーを解析できます。
29
- 論文読解 | [関数プラグイン] LaTeX論文の全文をワンクリックで解読し、要約を生成します。
30
- LaTeX全文翻訳、整形 | [関数プラグイン] ワンクリックでLaTeX論文を翻訳または整形できます。
31
- 注釈生成 | [関数プラグイン] ワンクリックで関数の注釈を大量に生成できます。
32
- チャット分析レポート生成 | [関数プラグイン] 実行後、まとめレポートを自動生成します。
33
- [arxivヘルパー](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] 入力したarxivの記事URLで要約をワンクリック翻訳+PDFダウンロードができます。
34
- [PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文タイトルと要約を抽出し、全文を翻訳します(マルチスレッド)。
35
- [Google Scholar Integratorヘルパー](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが興味深い記事を選択します。
36
- 数式/画像/テーブル表示 | 数式のTex形式とレンダリング形式を同時に表示できます。数式、コードのハイライトをサポートしています。
37
- マルチスレッド関数プラグインサポート | ChatGPTをマルチスレッドで呼び出すことができ、大量のテキストやプログラムを簡単に処理できます。
38
- ダークグラジオ[テーマ](https://github.com/binary-husky/chatgpt_academic/issues/173)の起動 | 「/?__dark-theme=true」というURLをブラウザに追加することで、ダークテーマに切り替えることができます。
39
- [多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)をサポート、[API2D](https://api2d.com/)インターフェースをサポート | GPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)による同時サポートは、と���も素晴らしいですね!
40
- huggingface免科学上网[オンライン版](https://huggingface.co/spaces/qingxu98/gpt-academic) | huggingfaceにログイン後、[このスペース](https://huggingface.co/spaces/qingxu98/gpt-academic)をコピーしてください。
41
- ...... | ......
42
-
43
-
44
- </div>
45
-
46
-
47
- - 新しいインターフェース(config.pyのLAYOUTオプションを変更するだけで、「左右レイアウト」と「上下レイアウト」を切り替えることができます)
48
- <div align="center">
49
- <img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
50
- </div>
51
-
52
-
53
- - すべてのボタンは、functional.pyを読み込んで動的に生成されます。カスタム機能を自由に追加して、クリップボードを解放します
54
- <div align="center">
55
- <img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
56
- </div>
57
-
58
- - 色を修正/修正
59
- <div align="center">
60
- <img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
61
- </div>
62
-
63
- - 出力に数式が含まれている場合、TeX形式とレンダリング形式の両方が表示され、コピーと読み取りが容易になります
64
- <div align="center">
65
- <img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
66
- </div>
67
-
68
- - プロジェクトのコードを見るのが面倒?chatgptに整備されたプロジェクトを直接与えましょう
69
- <div align="center">
70
- <img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
71
- </div>
72
-
73
- - 多数の大規模言語モデルの混合呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
74
- <div align="center">
75
- <img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
76
- </div>
77
-
78
- 多数の大規模言語モデルの混合呼び出し[huggingfaceテスト版](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta)(huggigface版はchatglmをサポートしていません)
79
-
80
-
81
- ---
82
-
83
- ## インストール-方法1:直接運転 (Windows、LinuxまたはMacOS)
84
-
85
- 1. プロジェクトをダウンロードします。
86
- ```sh
87
- git clone https://github.com/binary-husky/chatgpt_academic.git
88
- cd chatgpt_academic
89
- ```
90
-
91
- 2. API_KEYとプロキシ設定を構成する
92
-
93
- `config.py`で、海外のProxyとOpenAI API KEYを構成して説明します。
94
- ```
95
- 1.あなたが中国にいる場合、OpenAI APIをスムーズに使用するには海外プロキシを設定する必要があります。構成の詳細については、config.py(1.その中のUSE_PROXYをTrueに変更し、2.手順に従ってプロキシを変更する)を詳細に読んでください。
96
- 2. OpenAI API KEYを構成する。OpenAIのウェブサイトでAPI KEYを取得してください。一旦API KEYを手に入れると、config.pyファイルで設定するだけです。
97
- 3.プロキシネットワークに関連する問題(ネットワークタイムアウト、プロキシが動作しない)をhttps://github.com/binary-husky/chatgpt_academic/issues/1にまとめました。
98
- ```
99
- (P.S. プログラム実行時にconfig.pyの隣にconfig_private.pyという名前のプライバシー設定ファイルを作成し、同じ名前の設定を上書きするconfig_private.pyが存在するかどうかを優先的に確認します。そのため、私たちの構成読み取りロジックを理解できる場合は、config.pyの隣にconfig_private.pyという名前の新しい設定ファイルを作成し、その中のconfig.pyから設定を移動してください。config_private.pyはgitで保守されていないため、プライバシー情報をより安全にすることができます。)
100
-
101
- 3. 依存関係をインストールします。
102
- ```sh
103
- # 選択肢があります。
104
- python -m pip install -r requirements.txt
105
-
106
-
107
- # (選択肢2) もしAnacondaを使用する場合、手順は同様です:
108
- # (選択肢2.1) conda create -n gptac_venv python=3.11
109
- # (選択肢2.2) conda activate gptac_venv
110
- # (選択肢2.3) python -m pip install -r requirements.txt
111
-
112
- # 注: 公式のpipソースまたはAlibabaのpipソースを使用してください。 別のpipソース(例:一部の大学のpip)は問題が発生する可能性があります。 一時的なソースの切り替え方法:
113
- # python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
114
- ```
115
-
116
- もしあなたが清華ChatGLMをサポートする必要がある場合、さらに多くの依存関係をインストールする必要があります(Pythonに慣れない方やコンピューターの設定が十分でない方は、試みないことをお勧めします):
117
- ```sh
118
- python -m pip install -r request_llm/requirements_chatglm.txt
119
- ```
120
-
121
- 4. 実行
122
- ```sh
123
- python main.py
124
- ```
125
-
126
- 5. 関数プラグインのテスト
127
- ```
128
- - Pythonプロジェクト分析のテスト
129
- 入力欄に `./crazy_functions/test_project/python/dqn` と入力し、「Pythonプロジェクト全体の解析」をクリックします。
130
- - 自己コード解読のテスト
131
- 「[マルチスレッドデモ] このプロジェクト自体を解析します(ソースを翻訳して解読します)」をクリックします。
132
- - 実験的な機能テンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。
133
- 「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。
134
- - 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。
135
- ```
136
-
137
- ## インストール方法2:Dockerを使用する(Linux)
138
-
139
- 1. ChatGPTのみ(大多数の人にお勧めです)
140
- ``` sh
141
- # プロジェクトのダウンロード
142
- git clone https://github.com/binary-husky/chatgpt_academic.git
143
- cd chatgpt_academic
144
- # 海外プロキシとOpenAI API KEYの設定
145
- config.pyを任意のテキストエディタで編集する
146
- # インストール
147
- docker build -t gpt-academic .
148
- # 実行
149
- docker run --rm -it --net=host gpt-academic
150
-
151
- # 関数プラグインのテスト
152
- ## 関数プラグインテンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。
153
- 「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。
154
- ## Latexプロジェクトの要約を書くテスト
155
- 入力欄に./crazy_functions/test_project/latex/attentionと入力し、「テックス論文を読んで要約を書く」をクリックします。
156
- ## Pythonプロジェクト分析のテスト
157
- 入力欄に./crazy_functions/test_project/python/dqnと入力し、[Pythonプロジェクトの全解析]をクリックします。
158
-
159
- 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。
160
- ```
161
-
162
- 2. ChatGPT + ChatGLM(Dockerに非常に詳しい人+十分なコンピューター設定が必要)
163
-
164
-
165
-
166
- ```sh
167
- # Dockerfileの編集
168
- cd docs && nano Dockerfile+ChatGLM
169
- # ビルド方法
170
- docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
171
- # 実行方法 (1) 直接実行:
172
- docker run --rm -it --net=host --gpus=all gpt-academic
173
- # 実行方法 (2) コンテナに入って調整する:
174
- docker run --rm -it --net=host --gpus=all gpt-academic bash
175
- ```
176
-
177
- ## インストール方法3:その他のデプロイ方法
178
-
179
- 1. クラウドサーバーデプロイ
180
- [デプロイwiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
181
-
182
- 2. WSL2を使用 (Windows Subsystem for Linux)
183
- [デプロイwiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
184
-
185
-
186
- ## インストール-プロキシ設定
187
- 1. 通常の方法
188
- [プロキシを設定する](https://github.com/binary-husky/chatgpt_academic/issues/1)
189
-
190
- 2. 初心者向けチュートリアル
191
- [初心者向けチュートリアル](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
192
-
193
-
194
- ---
195
-
196
- ## カスタムボタンの追加(学術ショートカットキー)
197
-
198
- `core_functional.py`を任意のテキストエディタで開き、以下のエントリーを追加し、プログラムを再起動してください。(ボタンが追加されて表示される場合、前置詞と後置詞はホット編集がサポートされているため、プログラムを再起動せずに即座に有効になります。)
199
-
200
- 例:
201
- ```
202
- "超级英译中": {
203
- # 前置詞 - あなたの要求を説明するために使用されます。翻訳、コードの説明、編集など。
204
- "Prefix": "以下のコンテンツを中国語に翻訳して、マークダウンテーブルを使用して専門用語を説明してください。\n\n",
205
-
206
- # 後置詞 - プレフィックスと共に使用すると、入力内容を引用符で囲むことができます。
207
- "Suffix": "",
208
- },
209
- ```
210
-
211
- <div align="center">
212
- <img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
213
- </div>
214
-
215
-
216
- ---
217
-
218
- ## いくつかの機能の例
219
-
220
- ### 画像表示:
221
-
222
- <div align="center">
223
- <img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
224
- </div>
225
-
226
-
227
- ### プログラムが自己解析できる場合:
228
-
229
- <div align="center">
230
- <img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
231
- </div>
232
-
233
- <div align="center">
234
- <img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
235
- </div>
236
-
237
- ### 他のPython/Cppプロジェクトの解析:
238
-
239
- <div align="center">
240
- <img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
241
- </div>
242
-
243
- <div align="center">
244
- <img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
245
- </div>
246
-
247
- ### Latex論文の一括読解と要約生成
248
-
249
- <div align="center">
250
- <img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
251
- </div>
252
-
253
- ### 自動報告生成
254
-
255
- <div align="center">
256
- <img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
257
- <img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
258
- <img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
259
- </div>
260
-
261
- ### モジュール化された機能デザイン
262
-
263
- <div align="center">
264
- <img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
265
- <img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
266
- </div>
267
-
268
-
269
- ### ソースコードの英語翻訳
270
-
271
- <div align="center">
272
- <img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
273
- </div>
274
-
275
- ## Todo およびバージョン計画:
276
- - version 3.2+ (todo): 関数プラグインがより多くのパラメーターインターフェースをサポートするようになります。
277
- - version 3.1: 複数のgptモデルを同時にクエリし、api2dをサポートし、複数のapikeyの負荷分散をサポートします。
278
- - version 3.0: chatglmおよび他の小型llmのサポート
279
- - version 2.6: プラグイン構造を再構成し、相互作用性を高め、より多くのプラグインを追加しました。
280
- - version 2.5: 自己更新。総括的な大規模プロジェクトのソースコードをまとめた場合、テキストが長すぎる、トークンがオーバーフローする問題を解決します。
281
- - version 2.4: (1)PDF全文翻訳機能を追加。(2)入力エリアの位置を切り替える機能を追加。(3)垂直レイアウトオプションを追加。(4)マルチスレッド関数プラグインの最適化。
282
- - version 2.3: 多スレッドの相互作用性を向上させました。
283
- - version 2.2: 関数プラグインでホットリロードをサポート
284
- - version 2.1: 折りたたみ式レイアウト
285
- - version 2.0: モジュール化された関数プラグインを導入
286
- - version 1.0: 基本機能
287
-
288
- ## 参考および学習
289
-
290
-
291
- 以下は中国語のマークダウンファイルです。日本語に翻訳してください。既存のマークダウンコマンドを変更しないでください:
292
-
293
- ```
294
- 多くの優秀なプロジェクトの設計を参考にしています。主なものは以下の通りです:
295
-
296
- # 参考プロジェクト1:ChuanhuChatGPTから多くのテクニックを借用
297
- https://github.com/GaiZhenbiao/ChuanhuChatGPT
298
-
299
- # 参考プロジェクト2:清華ChatGLM-6B:
300
- https://github.com/THUDM/ChatGLM-6B
301
- ```
302
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CorvaeOboro/gen_ability_icon/torch_utils/ops/bias_act.cpp DELETED
@@ -1,99 +0,0 @@
1
- // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- //
3
- // NVIDIA CORPORATION and its licensors retain all intellectual property
4
- // and proprietary rights in and to this software, related documentation
5
- // and any modifications thereto. Any use, reproduction, disclosure or
6
- // distribution of this software and related documentation without an express
7
- // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- #include <torch/extension.h>
10
- #include <ATen/cuda/CUDAContext.h>
11
- #include <c10/cuda/CUDAGuard.h>
12
- #include "bias_act.h"
13
-
14
- //------------------------------------------------------------------------
15
-
16
- static bool has_same_layout(torch::Tensor x, torch::Tensor y)
17
- {
18
- if (x.dim() != y.dim())
19
- return false;
20
- for (int64_t i = 0; i < x.dim(); i++)
21
- {
22
- if (x.size(i) != y.size(i))
23
- return false;
24
- if (x.size(i) >= 2 && x.stride(i) != y.stride(i))
25
- return false;
26
- }
27
- return true;
28
- }
29
-
30
- //------------------------------------------------------------------------
31
-
32
- static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)
33
- {
34
- // Validate arguments.
35
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
36
- TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");
37
- TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");
38
- TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");
39
- TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");
40
- TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
41
- TORCH_CHECK(b.dim() == 1, "b must have rank 1");
42
- TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");
43
- TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");
44
- TORCH_CHECK(grad >= 0, "grad must be non-negative");
45
-
46
- // Validate layout.
47
- TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");
48
- TORCH_CHECK(b.is_contiguous(), "b must be contiguous");
49
- TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");
50
- TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");
51
- TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");
52
-
53
- // Create output tensor.
54
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
55
- torch::Tensor y = torch::empty_like(x);
56
- TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");
57
-
58
- // Initialize CUDA kernel parameters.
59
- bias_act_kernel_params p;
60
- p.x = x.data_ptr();
61
- p.b = (b.numel()) ? b.data_ptr() : NULL;
62
- p.xref = (xref.numel()) ? xref.data_ptr() : NULL;
63
- p.yref = (yref.numel()) ? yref.data_ptr() : NULL;
64
- p.dy = (dy.numel()) ? dy.data_ptr() : NULL;
65
- p.y = y.data_ptr();
66
- p.grad = grad;
67
- p.act = act;
68
- p.alpha = alpha;
69
- p.gain = gain;
70
- p.clamp = clamp;
71
- p.sizeX = (int)x.numel();
72
- p.sizeB = (int)b.numel();
73
- p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;
74
-
75
- // Choose CUDA kernel.
76
- void* kernel;
77
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
78
- {
79
- kernel = choose_bias_act_kernel<scalar_t>(p);
80
- });
81
- TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");
82
-
83
- // Launch CUDA kernel.
84
- p.loopX = 4;
85
- int blockSize = 4 * 32;
86
- int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
87
- void* args[] = {&p};
88
- AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
89
- return y;
90
- }
91
-
92
- //------------------------------------------------------------------------
93
-
94
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
95
- {
96
- m.def("bias_act", &bias_act);
97
- }
98
-
99
- //------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/vit.py DELETED
@@ -1,491 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import timm
4
- import types
5
- import math
6
- import torch.nn.functional as F
7
-
8
-
9
- class Slice(nn.Module):
10
- def __init__(self, start_index=1):
11
- super(Slice, self).__init__()
12
- self.start_index = start_index
13
-
14
- def forward(self, x):
15
- return x[:, self.start_index :]
16
-
17
-
18
- class AddReadout(nn.Module):
19
- def __init__(self, start_index=1):
20
- super(AddReadout, self).__init__()
21
- self.start_index = start_index
22
-
23
- def forward(self, x):
24
- if self.start_index == 2:
25
- readout = (x[:, 0] + x[:, 1]) / 2
26
- else:
27
- readout = x[:, 0]
28
- return x[:, self.start_index :] + readout.unsqueeze(1)
29
-
30
-
31
- class ProjectReadout(nn.Module):
32
- def __init__(self, in_features, start_index=1):
33
- super(ProjectReadout, self).__init__()
34
- self.start_index = start_index
35
-
36
- self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
37
-
38
- def forward(self, x):
39
- readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
40
- features = torch.cat((x[:, self.start_index :], readout), -1)
41
-
42
- return self.project(features)
43
-
44
-
45
- class Transpose(nn.Module):
46
- def __init__(self, dim0, dim1):
47
- super(Transpose, self).__init__()
48
- self.dim0 = dim0
49
- self.dim1 = dim1
50
-
51
- def forward(self, x):
52
- x = x.transpose(self.dim0, self.dim1)
53
- return x
54
-
55
-
56
- def forward_vit(pretrained, x):
57
- b, c, h, w = x.shape
58
-
59
- glob = pretrained.model.forward_flex(x)
60
-
61
- layer_1 = pretrained.activations["1"]
62
- layer_2 = pretrained.activations["2"]
63
- layer_3 = pretrained.activations["3"]
64
- layer_4 = pretrained.activations["4"]
65
-
66
- layer_1 = pretrained.act_postprocess1[0:2](layer_1)
67
- layer_2 = pretrained.act_postprocess2[0:2](layer_2)
68
- layer_3 = pretrained.act_postprocess3[0:2](layer_3)
69
- layer_4 = pretrained.act_postprocess4[0:2](layer_4)
70
-
71
- unflatten = nn.Sequential(
72
- nn.Unflatten(
73
- 2,
74
- torch.Size(
75
- [
76
- h // pretrained.model.patch_size[1],
77
- w // pretrained.model.patch_size[0],
78
- ]
79
- ),
80
- )
81
- )
82
-
83
- if layer_1.ndim == 3:
84
- layer_1 = unflatten(layer_1)
85
- if layer_2.ndim == 3:
86
- layer_2 = unflatten(layer_2)
87
- if layer_3.ndim == 3:
88
- layer_3 = unflatten(layer_3)
89
- if layer_4.ndim == 3:
90
- layer_4 = unflatten(layer_4)
91
-
92
- layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
93
- layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
94
- layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
95
- layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
96
-
97
- return layer_1, layer_2, layer_3, layer_4
98
-
99
-
100
- def _resize_pos_embed(self, posemb, gs_h, gs_w):
101
- posemb_tok, posemb_grid = (
102
- posemb[:, : self.start_index],
103
- posemb[0, self.start_index :],
104
- )
105
-
106
- gs_old = int(math.sqrt(len(posemb_grid)))
107
-
108
- posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
109
- posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
110
- posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
111
-
112
- posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
113
-
114
- return posemb
115
-
116
-
117
- def forward_flex(self, x):
118
- b, c, h, w = x.shape
119
-
120
- pos_embed = self._resize_pos_embed(
121
- self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
122
- )
123
-
124
- B = x.shape[0]
125
-
126
- if hasattr(self.patch_embed, "backbone"):
127
- x = self.patch_embed.backbone(x)
128
- if isinstance(x, (list, tuple)):
129
- x = x[-1] # last feature if backbone outputs list/tuple of features
130
-
131
- x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
132
-
133
- if getattr(self, "dist_token", None) is not None:
134
- cls_tokens = self.cls_token.expand(
135
- B, -1, -1
136
- ) # stole cls_tokens impl from Phil Wang, thanks
137
- dist_token = self.dist_token.expand(B, -1, -1)
138
- x = torch.cat((cls_tokens, dist_token, x), dim=1)
139
- else:
140
- cls_tokens = self.cls_token.expand(
141
- B, -1, -1
142
- ) # stole cls_tokens impl from Phil Wang, thanks
143
- x = torch.cat((cls_tokens, x), dim=1)
144
-
145
- x = x + pos_embed
146
- x = self.pos_drop(x)
147
-
148
- for blk in self.blocks:
149
- x = blk(x)
150
-
151
- x = self.norm(x)
152
-
153
- return x
154
-
155
-
156
- activations = {}
157
-
158
-
159
- def get_activation(name):
160
- def hook(model, input, output):
161
- activations[name] = output
162
-
163
- return hook
164
-
165
-
166
- def get_readout_oper(vit_features, features, use_readout, start_index=1):
167
- if use_readout == "ignore":
168
- readout_oper = [Slice(start_index)] * len(features)
169
- elif use_readout == "add":
170
- readout_oper = [AddReadout(start_index)] * len(features)
171
- elif use_readout == "project":
172
- readout_oper = [
173
- ProjectReadout(vit_features, start_index) for out_feat in features
174
- ]
175
- else:
176
- assert (
177
- False
178
- ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
179
-
180
- return readout_oper
181
-
182
-
183
- def _make_vit_b16_backbone(
184
- model,
185
- features=[96, 192, 384, 768],
186
- size=[384, 384],
187
- hooks=[2, 5, 8, 11],
188
- vit_features=768,
189
- use_readout="ignore",
190
- start_index=1,
191
- ):
192
- pretrained = nn.Module()
193
-
194
- pretrained.model = model
195
- pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
196
- pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
197
- pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
198
- pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
199
-
200
- pretrained.activations = activations
201
-
202
- readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
203
-
204
- # 32, 48, 136, 384
205
- pretrained.act_postprocess1 = nn.Sequential(
206
- readout_oper[0],
207
- Transpose(1, 2),
208
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
209
- nn.Conv2d(
210
- in_channels=vit_features,
211
- out_channels=features[0],
212
- kernel_size=1,
213
- stride=1,
214
- padding=0,
215
- ),
216
- nn.ConvTranspose2d(
217
- in_channels=features[0],
218
- out_channels=features[0],
219
- kernel_size=4,
220
- stride=4,
221
- padding=0,
222
- bias=True,
223
- dilation=1,
224
- groups=1,
225
- ),
226
- )
227
-
228
- pretrained.act_postprocess2 = nn.Sequential(
229
- readout_oper[1],
230
- Transpose(1, 2),
231
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
232
- nn.Conv2d(
233
- in_channels=vit_features,
234
- out_channels=features[1],
235
- kernel_size=1,
236
- stride=1,
237
- padding=0,
238
- ),
239
- nn.ConvTranspose2d(
240
- in_channels=features[1],
241
- out_channels=features[1],
242
- kernel_size=2,
243
- stride=2,
244
- padding=0,
245
- bias=True,
246
- dilation=1,
247
- groups=1,
248
- ),
249
- )
250
-
251
- pretrained.act_postprocess3 = nn.Sequential(
252
- readout_oper[2],
253
- Transpose(1, 2),
254
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
255
- nn.Conv2d(
256
- in_channels=vit_features,
257
- out_channels=features[2],
258
- kernel_size=1,
259
- stride=1,
260
- padding=0,
261
- ),
262
- )
263
-
264
- pretrained.act_postprocess4 = nn.Sequential(
265
- readout_oper[3],
266
- Transpose(1, 2),
267
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
268
- nn.Conv2d(
269
- in_channels=vit_features,
270
- out_channels=features[3],
271
- kernel_size=1,
272
- stride=1,
273
- padding=0,
274
- ),
275
- nn.Conv2d(
276
- in_channels=features[3],
277
- out_channels=features[3],
278
- kernel_size=3,
279
- stride=2,
280
- padding=1,
281
- ),
282
- )
283
-
284
- pretrained.model.start_index = start_index
285
- pretrained.model.patch_size = [16, 16]
286
-
287
- # We inject this function into the VisionTransformer instances so that
288
- # we can use it with interpolated position embeddings without modifying the library source.
289
- pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
290
- pretrained.model._resize_pos_embed = types.MethodType(
291
- _resize_pos_embed, pretrained.model
292
- )
293
-
294
- return pretrained
295
-
296
-
297
- def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
298
- model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
299
-
300
- hooks = [5, 11, 17, 23] if hooks == None else hooks
301
- return _make_vit_b16_backbone(
302
- model,
303
- features=[256, 512, 1024, 1024],
304
- hooks=hooks,
305
- vit_features=1024,
306
- use_readout=use_readout,
307
- )
308
-
309
-
310
- def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
311
- model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
312
-
313
- hooks = [2, 5, 8, 11] if hooks == None else hooks
314
- return _make_vit_b16_backbone(
315
- model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
316
- )
317
-
318
-
319
- def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
320
- model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
321
-
322
- hooks = [2, 5, 8, 11] if hooks == None else hooks
323
- return _make_vit_b16_backbone(
324
- model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
325
- )
326
-
327
-
328
- def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
329
- model = timm.create_model(
330
- "vit_deit_base_distilled_patch16_384", pretrained=pretrained
331
- )
332
-
333
- hooks = [2, 5, 8, 11] if hooks == None else hooks
334
- return _make_vit_b16_backbone(
335
- model,
336
- features=[96, 192, 384, 768],
337
- hooks=hooks,
338
- use_readout=use_readout,
339
- start_index=2,
340
- )
341
-
342
-
343
- def _make_vit_b_rn50_backbone(
344
- model,
345
- features=[256, 512, 768, 768],
346
- size=[384, 384],
347
- hooks=[0, 1, 8, 11],
348
- vit_features=768,
349
- use_vit_only=False,
350
- use_readout="ignore",
351
- start_index=1,
352
- ):
353
- pretrained = nn.Module()
354
-
355
- pretrained.model = model
356
-
357
- if use_vit_only == True:
358
- pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
359
- pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
360
- else:
361
- pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
362
- get_activation("1")
363
- )
364
- pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
365
- get_activation("2")
366
- )
367
-
368
- pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
369
- pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
370
-
371
- pretrained.activations = activations
372
-
373
- readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
374
-
375
- if use_vit_only == True:
376
- pretrained.act_postprocess1 = nn.Sequential(
377
- readout_oper[0],
378
- Transpose(1, 2),
379
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
380
- nn.Conv2d(
381
- in_channels=vit_features,
382
- out_channels=features[0],
383
- kernel_size=1,
384
- stride=1,
385
- padding=0,
386
- ),
387
- nn.ConvTranspose2d(
388
- in_channels=features[0],
389
- out_channels=features[0],
390
- kernel_size=4,
391
- stride=4,
392
- padding=0,
393
- bias=True,
394
- dilation=1,
395
- groups=1,
396
- ),
397
- )
398
-
399
- pretrained.act_postprocess2 = nn.Sequential(
400
- readout_oper[1],
401
- Transpose(1, 2),
402
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
403
- nn.Conv2d(
404
- in_channels=vit_features,
405
- out_channels=features[1],
406
- kernel_size=1,
407
- stride=1,
408
- padding=0,
409
- ),
410
- nn.ConvTranspose2d(
411
- in_channels=features[1],
412
- out_channels=features[1],
413
- kernel_size=2,
414
- stride=2,
415
- padding=0,
416
- bias=True,
417
- dilation=1,
418
- groups=1,
419
- ),
420
- )
421
- else:
422
- pretrained.act_postprocess1 = nn.Sequential(
423
- nn.Identity(), nn.Identity(), nn.Identity()
424
- )
425
- pretrained.act_postprocess2 = nn.Sequential(
426
- nn.Identity(), nn.Identity(), nn.Identity()
427
- )
428
-
429
- pretrained.act_postprocess3 = nn.Sequential(
430
- readout_oper[2],
431
- Transpose(1, 2),
432
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
433
- nn.Conv2d(
434
- in_channels=vit_features,
435
- out_channels=features[2],
436
- kernel_size=1,
437
- stride=1,
438
- padding=0,
439
- ),
440
- )
441
-
442
- pretrained.act_postprocess4 = nn.Sequential(
443
- readout_oper[3],
444
- Transpose(1, 2),
445
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
446
- nn.Conv2d(
447
- in_channels=vit_features,
448
- out_channels=features[3],
449
- kernel_size=1,
450
- stride=1,
451
- padding=0,
452
- ),
453
- nn.Conv2d(
454
- in_channels=features[3],
455
- out_channels=features[3],
456
- kernel_size=3,
457
- stride=2,
458
- padding=1,
459
- ),
460
- )
461
-
462
- pretrained.model.start_index = start_index
463
- pretrained.model.patch_size = [16, 16]
464
-
465
- # We inject this function into the VisionTransformer instances so that
466
- # we can use it with interpolated position embeddings without modifying the library source.
467
- pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
468
-
469
- # We inject this function into the VisionTransformer instances so that
470
- # we can use it with interpolated position embeddings without modifying the library source.
471
- pretrained.model._resize_pos_embed = types.MethodType(
472
- _resize_pos_embed, pretrained.model
473
- )
474
-
475
- return pretrained
476
-
477
-
478
- def _make_pretrained_vitb_rn50_384(
479
- pretrained, use_readout="ignore", hooks=None, use_vit_only=False
480
- ):
481
- model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
482
-
483
- hooks = [0, 1, 8, 11] if hooks == None else hooks
484
- return _make_vit_b_rn50_backbone(
485
- model,
486
- features=[256, 512, 768, 768],
487
- size=[384, 384],
488
- hooks=hooks,
489
- use_vit_only=use_vit_only,
490
- use_readout=use_readout,
491
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_fixes.py DELETED
@@ -1,77 +0,0 @@
1
- # JSONDecodeError was introduced in requests=2.27 released in 2022.
2
- # This allows us to support older requests for users
3
- # More information: https://github.com/psf/requests/pull/5856
4
- try:
5
- from requests import JSONDecodeError # type: ignore # noqa: F401
6
- except ImportError:
7
- try:
8
- from simplejson import JSONDecodeError # type: ignore # noqa: F401
9
- except ImportError:
10
- from json import JSONDecodeError # type: ignore # noqa: F401
11
-
12
- import contextlib
13
- import os
14
- import shutil
15
- import stat
16
- import tempfile
17
- from functools import partial
18
- from pathlib import Path
19
- from typing import Callable, Generator, Optional, Union
20
-
21
- import yaml
22
-
23
-
24
- # Wrap `yaml.dump` to set `allow_unicode=True` by default.
25
- #
26
- # Example:
27
- # ```py
28
- # >>> yaml.dump({"emoji": "👀", "some unicode": "日本か"})
29
- # 'emoji: "\\U0001F440"\nsome unicode: "\\u65E5\\u672C\\u304B"\n'
30
- #
31
- # >>> yaml_dump({"emoji": "👀", "some unicode": "日本か"})
32
- # 'emoji: "👀"\nsome unicode: "日本か"\n'
33
- # ```
34
- yaml_dump: Callable[..., str] = partial(yaml.dump, stream=None, allow_unicode=True) # type: ignore
35
-
36
-
37
- @contextlib.contextmanager
38
- def SoftTemporaryDirectory(
39
- suffix: Optional[str] = None,
40
- prefix: Optional[str] = None,
41
- dir: Optional[Union[Path, str]] = None,
42
- **kwargs,
43
- ) -> Generator[str, None, None]:
44
- """
45
- Context manager to create a temporary directory and safely delete it.
46
-
47
- If tmp directory cannot be deleted normally, we set the WRITE permission and retry.
48
- If cleanup still fails, we give up but don't raise an exception. This is equivalent
49
- to `tempfile.TemporaryDirectory(..., ignore_cleanup_errors=True)` introduced in
50
- Python 3.10.
51
-
52
- See https://www.scivision.dev/python-tempfile-permission-error-windows/.
53
- """
54
- tmpdir = tempfile.TemporaryDirectory(prefix=prefix, suffix=suffix, dir=dir, **kwargs)
55
- yield tmpdir.name
56
-
57
- try:
58
- # First once with normal cleanup
59
- shutil.rmtree(tmpdir.name)
60
- except Exception:
61
- # If failed, try to set write permission and retry
62
- try:
63
- shutil.rmtree(tmpdir.name, onerror=_set_write_permission_and_retry)
64
- except Exception:
65
- pass
66
-
67
- # And finally, cleanup the tmpdir.
68
- # If it fails again, give up but do not throw error
69
- try:
70
- tmpdir.cleanup()
71
- except Exception:
72
- pass
73
-
74
-
75
- def _set_write_permission_and_retry(func, path, excinfo):
76
- os.chmod(path, stat.S_IWRITE)
77
- func(path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeclK/pose/main.py DELETED
@@ -1,96 +0,0 @@
1
- # Inference 2 videos and use dtw to match the pose keypoints.
2
- from tools.inferencer import PoseInferencerV2
3
- from tools.dtw import DTWForKeypoints
4
- from tools.visualizer import FastVisualizer
5
- from argparse import ArgumentParser
6
- from tools.utils import convert_video_to_playable_mp4
7
- from tqdm import tqdm
8
- import mmengine
9
- import numpy as np
10
- import mmcv
11
- import cv2
12
-
13
- def parse_args():
14
- parser = ArgumentParser()
15
- parser.add_argument('--config', type=str, default='configs/mark2.py')
16
- parser.add_argument('--video1', type=str, default='assets/tennis1.mp4')
17
- parser.add_argument('--video2', type=str, default='assets/tennis2.mp4')
18
- return parser.parse_args()
19
-
20
- def concat(img1, img2, height=1080):
21
- h1, w1, _ = img1.shape
22
- h2, w2, _ = img2.shape
23
-
24
- # Calculate the scaling factor for each image
25
- scale1 = height / img1.shape[0]
26
- scale2 = height / img2.shape[0]
27
-
28
- # Resize the images
29
- img1 = cv2.resize(img1, (int(w1*scale1), int(h1*scale1)))
30
- img2 = cv2.resize(img2, (int(w2*scale2), int(h2*scale2)))
31
-
32
- # Concatenate the images horizontally
33
- image = cv2.hconcat([img1, img2])
34
- return image
35
-
36
- def draw(vis: FastVisualizer, img, keypoint, box, oks, oks_unnorm, draw_score_bar=True):
37
- vis.set_image(img)
38
- vis.draw_non_transparent_area(box)
39
- if draw_score_bar:
40
- vis.draw_score_bar(oks)
41
- vis.draw_human_keypoints(keypoint, oks_unnorm)
42
- return vis.get_image()
43
-
44
- def main(cfg):
45
- # build PoseInferencerV2
46
- pose_inferencer = PoseInferencerV2(
47
- cfg.det_cfg,
48
- cfg.pose_cfg,
49
- device='cpu')
50
-
51
- v1 = mmcv.VideoReader(cfg.video1)
52
- v2 = mmcv.VideoReader(cfg.video2)
53
- video_writer = None
54
-
55
- all_det1, all_pose1 = pose_inferencer.inference_video(cfg.video1)
56
- all_det2, all_pose2 = pose_inferencer.inference_video(cfg.video2)
57
-
58
- keypoints1 = np.stack([p.keypoints[0] for p in all_pose1]) # forced the first pred
59
- keypoints2 = np.stack([p.keypoints[0] for p in all_pose2])
60
- boxes1 = np.stack([d.bboxes[0] for d in all_det1])
61
- boxes2 = np.stack([d.bboxes[0] for d in all_det2])
62
-
63
- dtw_path, oks, oks_unnorm = DTWForKeypoints(keypoints1, keypoints2).get_dtw_path()
64
-
65
- vis = FastVisualizer()
66
-
67
- for i, j in tqdm(dtw_path):
68
- frame1 = v1[i]
69
- frame2 = v2[j]
70
-
71
- frame1_ = draw(vis, frame1.copy(), keypoints1[i], boxes1[i],
72
- oks[i, j], oks_unnorm[i, j])
73
- frame2_ = draw(vis, frame2.copy(), keypoints2[j], boxes2[j],
74
- oks[i, j], oks_unnorm[i, j], draw_score_bar=False)
75
- # concate two frames
76
- frame = concat(frame1_, frame2_)
77
- # draw logo
78
- vis.set_image(frame)
79
- frame = vis.draw_logo().get_image()
80
- # write video
81
- w, h = frame.shape[1], frame.shape[0]
82
- if video_writer is None:
83
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
84
- video_writer = cv2.VideoWriter('dtw_compare.mp4',
85
- fourcc, v1.fps, (w, h))
86
- video_writer.write(frame)
87
- video_writer.release()
88
- convert_video_to_playable_mp4('dtw_compare.mp4')
89
-
90
- if __name__ == '__main__':
91
- args = parse_args()
92
- cfg = mmengine.Config.fromfile(args.config)
93
- cfg.video1 = args.video1
94
- cfg.video2 = args.video2
95
-
96
- main(cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Demosthene-OR/avr23-cds-translation/style.css DELETED
@@ -1,113 +0,0 @@
1
- /*
2
- h1,
3
- h2,
4
- h3,
5
- h4 {
6
- color: #000000;
7
- }
8
- */
9
-
10
- /* La ligne suivante est nécessaire à cause du module streamlit_option_menu qui "casse" les CSS suivants */
11
- @media (prefers-color-scheme: dark) {
12
- .st-cc {
13
- color: #fff!important; /* Couleur du texte en mode sombre */
14
- }
15
- .st-cg:hover {
16
- color: rgb(255, 75, 75)!important; /* Couleur du texte en mode sombre */
17
- }
18
- }
19
-
20
- p {
21
- margin-bottom:0.1rem;
22
- }
23
-
24
- code {
25
- color: #1ec3bc;
26
- }
27
-
28
- #MainMenu {
29
- display: none;
30
- }
31
-
32
- div[data-testid="stDecoration"] {
33
- display: none;
34
- }
35
-
36
- footer {
37
- display: none;
38
- }
39
-
40
- /* Radio buttons */
41
-
42
- .st-cc {
43
- color: black;
44
- font-weight: 500;
45
- }
46
-
47
- /* Sidebar */
48
-
49
- .css-1544g2n {
50
- padding-top: 1rem;
51
- }
52
-
53
- .css-10oheav {
54
- padding-top: 3rem;
55
- }
56
-
57
- .css-ue6h4q {
58
- min-height: 0.5rem;
59
- }
60
-
61
- section[data-testid="stSidebar"] > div {
62
- background-color: #10b8dd;
63
- padding-top: 1rem;
64
- padding-left: 1.5rem;
65
- }
66
-
67
- section[data-testid="stSidebar"] button[title="View fullscreen"] {
68
- display: none;
69
- }
70
-
71
- section[data-testid="stSidebar"] button[kind="icon"] {
72
- display: none;
73
- }
74
-
75
- section[data-testid="stSidebar"] .st-bk {
76
- background-color: #10b8dd;
77
- }
78
-
79
- section[data-testid="stSidebar"] .st-c0 {
80
- /* background-color: #10b8dd; */
81
- }
82
-
83
- section[data-testid="stSidebar"] hr {
84
- margin-top: 30px;
85
- border-color: white;
86
- width: 50px;
87
- }
88
-
89
- section[data-testid="stSidebar"] h2 {
90
- color: white;
91
- }
92
-
93
- /* Images */
94
-
95
- button[title="View fullscreen"] {
96
- display: none;
97
- }
98
-
99
- /* hr */
100
-
101
- hr {
102
- width: 200px;
103
- border-width: 5px;
104
- border-color: #10b8dd;
105
- margin-top: 0px;
106
- }
107
-
108
- /* First Page */
109
-
110
- section[tabindex="0"] .block-container {
111
- padding-top: 0px;
112
- padding-bottom: 0px;
113
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-avatar-frontend/src/converter.js DELETED
@@ -1,97 +0,0 @@
1
- import {
2
- AnimationClip,
3
- BooleanKeyframeTrack,
4
- ColorKeyframeTrack,
5
- NumberKeyframeTrack,
6
- Vector3,
7
- VectorKeyframeTrack
8
- } from 'three';
9
-
10
- var fps = 60
11
-
12
- function modifiedKey(key) {
13
-
14
- if (["eyeLookDownLeft", "eyeLookDownRight", "eyeLookInLeft", "eyeLookInRight", "eyeLookOutLeft", "eyeLookOutRight", "eyeLookUpLeft", "eyeLookUpRight"].includes(key)) {
15
- return key
16
- }
17
-
18
- if (key.endsWith("Right")) {
19
- return key.replace("Right", "_R");
20
- }
21
- if (key.endsWith("Left")) {
22
- return key.replace("Left", "_L");
23
- }
24
- return key;
25
- }
26
-
27
- function createAnimation (recordedData, morphTargetDictionary, bodyPart) {
28
-
29
- // console.log("----morphTargetDictionary", morphTargetDictionary)
30
-
31
- if (recordedData.length != 0) {
32
- let animation = []
33
- for (let i = 0; i < Object.keys(morphTargetDictionary).length; i++) {
34
- animation.push([])
35
- }
36
- let time = []
37
- let finishedFrames = 0
38
- recordedData.forEach((d, i) => {
39
- Object.entries(d.blendshapes).forEach(([key, value]) => {
40
-
41
- if (! (modifiedKey(key) in morphTargetDictionary)) {return};
42
-
43
- if (key == 'mouthShrugUpper') {
44
- value += 0.4;
45
- }
46
-
47
- animation[morphTargetDictionary[modifiedKey(key)]].push(value)
48
- });
49
- time.push(finishedFrames / fps)
50
- finishedFrames++
51
-
52
- })
53
-
54
- // console.log("-----animation", animation);
55
-
56
- let tracks = []
57
-
58
- let flag = false;
59
- //create morph animation
60
- Object.entries(recordedData[0].blendshapes).forEach(([key, value]) => {
61
-
62
- if (! (modifiedKey(key) in morphTargetDictionary)) {return};
63
-
64
- let i = morphTargetDictionary[modifiedKey(key)]
65
-
66
- // if (bodyPart === "HG_TeethLower") {
67
-
68
- // if (flag === true)
69
- // return;
70
-
71
- // if(key === 'jawOpen') {
72
- // let track2 = new NumberKeyframeTrack(`HG_TeethLower.morphTargetInfluences[${i}]`, time, animation[i])
73
- // tracks.push(track2)
74
- // flag = true
75
- // }
76
- // } else {
77
- let track = new NumberKeyframeTrack(`${bodyPart}.morphTargetInfluences[${i}]`, time, animation[i])
78
-
79
- tracks.push(track)
80
-
81
- // }
82
-
83
-
84
- // if (key === "jawOpen") {
85
- // let track2 = new NumberKeyframeTrack(`HG_TeethLower.morphTargetInfluences[${i}]`, time, animation[i])
86
- // tracks.push(track2)
87
- // console.log("----jawOpen Track", track2);
88
- // }
89
- });
90
-
91
- const clip = new AnimationClip('animation', -1, tracks);
92
- return clip
93
- }
94
- return null
95
- }
96
-
97
- export default createAnimation;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dogge/bigscience-bloomz-7b1/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/bigscience/bloomz-7b1").launch()