parquet-converter commited on
Commit
272e524
·
1 Parent(s): af302c4

Update parquet files (step 13 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Crea Il Tuo Personaggio Cartoon Fix.md +0 -64
  2. spaces/1gistliPinn/ChatGPT4/Examples/Enjoy Lords of the Realm III Without Verification Download Here.md +0 -5
  3. spaces/1phancelerku/anime-remove-background/Dark Riddle Hack How I Beat the Game with Unlimited Resources.md +0 -82
  4. spaces/1phancelerku/anime-remove-background/Download 8 Ball Pool for Java and Play with Friends Online.md +0 -95
  5. spaces/1phancelerku/anime-remove-background/Free Download IGNOU Solved Assignment for M.Com 2023 All Subjects and Languages.md +0 -88
  6. spaces/1toTree/lora_test/ppdiffusers/pipelines/pndm/__init__.py +0 -17
  7. spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/prepare/download_glove.sh +0 -9
  8. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/__init__.py +0 -82
  9. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/diffusionmodules/custom_openaimodel.py +0 -368
  10. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/lstm.py +0 -25
  11. spaces/AgentVerse/agentVerse/agentverse/output_parser/__init__.py +0 -5
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/lineprogresscanvas/LineProgressCanvas.d.ts +0 -2
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Factory.js +0 -11
  14. spaces/Alexxggs/ggvpnewen/README.md +0 -12
  15. spaces/AlgoveraAI/ocean-marketplace/README.md +0 -46
  16. spaces/Alpaca233/SadTalker/scripts/download_models.sh +0 -32
  17. spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/projectors/w_projector.py +0 -161
  18. spaces/Amrrs/DragGan-Inversion/torch_utils/training_stats.py +0 -283
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/colossalai/train_dreambooth_colossalai.py +0 -673
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_unclip_txt2img_to_image_variation.py +0 -41
  21. spaces/Andy1621/UniFormerV2_mit_demo/uniformerv2.py +0 -510
  22. spaces/Andy1621/uniformer_image_detection/configs/regnet/README.md +0 -96
  23. spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py +0 -2
  24. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/README.md +0 -85
  25. spaces/AnthonyTruchetPoC/persistent-docker/scripts/run-tests.sh +0 -2
  26. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/benchmarks.md +0 -196
  27. spaces/Ayanoaisho/L/README.md +0 -10
  28. spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/tone_sandhi.py +0 -351
  29. spaces/Benson/text-generation/Examples/Carx Deriva Carreras Mod Apk Vieja Versin.md +0 -88
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/easter.py +0 -89
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/specifiers.py +0 -802
  32. spaces/BraydenMoore/MARCI-NFL-Betting/Templates/index.html +0 -724
  33. spaces/BraydenMoore/a-random-unsecured-camera/Dockerfile +0 -29
  34. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/rrpn.py +0 -74
  35. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/extrema.h +0 -568
  36. spaces/CVPR/LIVE/thrust/thrust/system/cuda/error.h +0 -183
  37. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/scan_by_key.h +0 -44
  38. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/tabulate.h +0 -49
  39. spaces/CVPR/WALT/mmdet/core/anchor/anchor_generator.py +0 -727
  40. spaces/CVPR/WALT/mmdet/models/dense_heads/rpn_test_mixin.py +0 -59
  41. spaces/CVPR/transfiner/configs/Detectron1-Comparisons/README.md +0 -84
  42. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/registry.py +0 -66
  43. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/predictor.py +0 -269
  44. spaces/Casio991ms/MathBot/README.md +0 -13
  45. spaces/ClementBM/connectfour/connectfour/app.py +0 -186
  46. spaces/Cong723/gpt-academic-public/crazy_functions/__init__.py +0 -0
  47. spaces/Cpp4App/Cpp4App/SEM/P1_PP_processing.py +0 -120
  48. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/matcher.py +0 -112
  49. spaces/Cyril666/my_abi/docker/Dockerfile +0 -25
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/QoiImagePlugin.py +0 -105
spaces/1gistliPinn/ChatGPT4/Crea Il Tuo Personaggio Cartoon Fix.md DELETED
@@ -1,64 +0,0 @@
1
- ## crea il tuo personaggio cartoon
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- **DOWNLOAD [https://lomasmavi.blogspot.com/?c=2txmL8](https://lomasmavi.blogspot.com/?c=2txmL8)**
12
-
13
-
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
- Here is a possible title and article with html formatting for the keyword "crea il tuo personaggio cartoon":
24
-
25
- # Come creare il tuo personaggio cartoon online gratis
26
-
27
-
28
-
29
- Se ti piacciono i cartoni animati e vuoi creare il tuo personaggio cartoon personalizzato, ci sono diversi strumenti online che ti possono aiutare. In questo articolo ti presentiamo tre opzioni gratuite e facili da usare per realizzare il tuo avatar cartoon in pochi minuti.
30
-
31
-
32
-
33
- ## Canva
34
-
35
-
36
-
37
- Canva è una piattaforma di design online che ti permette di creare avatar divertenti e simpatici per te o per il tuo brand. Puoi scegliere tra vari modelli personalizzabili e utilizzare le applicazioni integrate per creare avatar da stilizzare a modo tuo. Puoi anche caricare le tue foto e trasformarle in avatar con l'app Bitmoji. Canva ti offre anche la possibilità di aggiungere bordi, sfondi, testi e illustrazioni al tuo avatar per renderlo unico e originale. Puoi scaricare il tuo avatar o condividerlo sui social media, sul tuo sito web o su qualsiasi materiale pubblicitario o di marketing.[^1^]
38
-
39
-
40
-
41
- ## Animaker
42
-
43
-
44
-
45
- Animaker è uno strumento online per creare video animati in modo rapido e facile. Puoi scegliere tra una vasta gamma di modelli video disponibili nella libreria e modificarli come preferisci. Puoi anche creare il tuo video da zero e personalizzare ogni elemento con testi animati, immagini, personaggi dei cartoni, sfondi o proprietà . Animaker ti offre anche la possibilità di creare personaggi dei cartoni personalizzati con il suo strumento di creazione dei personaggi. Puoi scegliere tra una vastissima gamma di accessori, costumi, caratteristiche facciali ed espressioni per realizzare miliardi di personaggi unici. Inoltre, puoi aggiungere voice over realistici ai tuoi personaggi con il motore Text-to-Speech e sincronizzare automaticamente le labbra con il movimento delle labbra. Puoi scaricare il tuo video cartone o pubblicarlo sui tuoi profili social.[^2^]
46
-
47
-
48
-
49
- ## Adobe Express
50
-
51
-
52
-
53
- Adobe Express è un servizio online che ti permette di creare avatar personalizzati per i tuoi profili social, per Twitch, YouTube e altro ancora. Puoi esplorare la collezione di icone e immagini di Adobe Express per progettare un avatar che rispecchi la tua personalità online. Puoi anche caricare le tue foto e applicare vari filtri ed effetti per trasformarle in avatar cartoon. Adobe Express ti offre anche la possibilità di aggiustare le dimensioni, il colore e la posizione del tuo avatar per adattarlo al formato desiderato. Puoi salvare il tuo avatar o condividerlo direttamente sui tuoi canali online.[^3^]
54
-
55
- Here is a possible continuation of the article:
56
-
57
- Questi sono solo alcuni dei tanti strumenti online che ti permettono di creare il tuo personaggio cartoon gratis. Ognuno di essi ha i suoi vantaggi e svantaggi, quindi ti consigliamo di provarli tutti e scegliere quello che più si adatta alle tue esigenze e preferenze. Creare il tuo personaggio cartoon online è un modo divertente e creativo per esprimere la tua personalità e il tuo stile. Puoi usare il tuo avatar per comunicare con i tuoi amici, i tuoi fan o i tuoi clienti, per creare contenuti originali e accattivanti o per promuovere il tuo brand o il tuo progetto. Che aspetti? Inizia subito a creare il tuo personaggio cartoon online gratis!
58
-
59
- dfd1c89656
60
-
61
-
62
-
63
-
64
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Enjoy Lords of the Realm III Without Verification Download Here.md DELETED
@@ -1,5 +0,0 @@
1
-
2
- <p><strong>8.2</strong> We do not pre-screen, approve, endorse, or own Your UGC (as well as UGC of other users of the Game), which You have uploaded or made available to other users via the Game or the Services. You create, download, and use the User Generated Content at Your own risk. Still by uploading or making available Your UGC via the Game or the Services, You grant us a non-exclusive, transferable, sublicensable, worldwide, irrevocable license to store, publish, print, distribute, reproduce, copy, fix, perform, adapt, modify, transfer, and use for commercial purposes (including, but not limited to, the use in for advertisement purposes) Your UGC without any notice or further compensation to You.</p>
3
- <h2>Lords of the Realm III download without verification</h2><br /><p><b><b>Download</b> &#10002; <a href="https://imgfil.com/2uxZ5i">https://imgfil.com/2uxZ5i</a></b></p><br /><br /> aaccfb2cb3<br />
4
- <br />
5
- <br />
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dark Riddle Hack How I Beat the Game with Unlimited Resources.md DELETED
@@ -1,82 +0,0 @@
1
- <br />
2
- <h1>Dark Riddle Hack: How to Unlock All Skins and Quests</h1>
3
- <p>If you are a fan of stealth games, you might have heard of Dark Riddle, a popular mobile game that lets you explore your neighbor's house and discover his secrets. But did you know that you can use a hack tool to unlock all the skins and quests in the game? In this article, we will show you how to do that and more.</p>
4
- <h2>dark riddle hack</h2><br /><p><b><b>Download File</b> &#9745; <a href="https://jinyurl.com/2uNS0x">https://jinyurl.com/2uNS0x</a></b></p><br /><br />
5
- <h2>What is Dark Riddle?</h2>
6
- <p>Dark Riddle is a 3D adventure game developed by Nika Entertainment. It is available for both Android and iOS devices. In this game, you play as a curious character who wants to find out what your neighbor is hiding in his basement. You have to sneak into his house, avoid his traps and cameras, and solve puzzles to progress. You can also interact with various objects and characters in the game, such as a cat, a dog, a crow, a pizza delivery guy, and more.</p>
7
- <p>The game has many features that make it fun and challenging, such as:</p>
8
- <ul>
9
- <li>Different modes of difficulty: easy, normal, hard, and nightmare.</li>
10
- <li>Different endings depending on your choices and actions.</li>
11
- <li>Different skins for your character that change your appearance and abilities.</li>
12
- <li>Different quests that give you rewards and unlock new content.</li>
13
- <li>A multiplayer mode that allows you to play with or against other players online.</li>
14
- </ul>
15
- <h2>Why do you need Dark Riddle hack?</h2>
16
- <p>Dark Riddle is a free-to-play game, but it also has some in-app purchases that can enhance your gaming experience. For example, you can buy coins, gems, keys, hints, and premium skins using real money. However, not everyone can afford or want to spend money on these items. That's where Dark Riddle hack comes in handy.</p>
17
- <h3>The benefits of using the hack tool</h3>
18
- <p>By using Dark Riddle hack, you can get access to a hidden menu that gives you unlimited resources and options. You can use these to:</p>
19
- <ul>
20
- <li>Unlock all skins and quests in the game without spending any money or completing any tasks.</li>
21
- <li>Get unlimited coins, gems, keys, hints, and energy to use in the game.</li>
22
- <li>Change your character's speed, size, gravity, invisibility, and other parameters.</li>
23
- <li>Modify the game's settings, such as the time of day, the weather, the sound effects, and more.</li>
24
- <li>Have fun with different cheats, such as flying mode, ghost mode, god mode, etc.</li>
25
- </ul>
26
- <h3>The risks of using the hack tool</h3>
27
- <p>However, using Dark Riddle hack also comes with some risks that you should be aware of. These include:</p>
28
- <ul>
29
- <li>The possibility of getting banned from the game or losing your progress if the developers detect your hacking activity.</li>
30
- <li>The possibility of getting malware or viruses on your device if you download the hack tool from an untrusted source.</li>
31
- <li>The possibility of ruining the fun and challenge of the game if you abuse the hack tool or spoil yourself with all the content.</li>
32
- </ul>
33
- <p>Therefore, you should use Dark Riddle the hack tool to change your size, but don't set it too big or small or you will have trouble with doors or objects.</li>
34
- <li>Use your gravity wisely. Gravity can help you jump higher or lower, but it also affects your landing and balance. You can use the hack tool to change your gravity, but don't set it too high or low or you will fall hard or float away.</li>
35
- <li>Use your invisibility carefully. Invisibility can help you avoid detection or surprise your enemy, but it also affects your interaction and vision. You can use the hack tool to make yourself invisible, but don't set it too long or short or you will miss some actions or alerts.</li>
36
- <li>Use your cheats sparingly. Cheats can help you have fun with different modes, such as flying mode, ghost mode, god mode, etc. You can use the hack tool to activate them, but don't abuse them or you will lose the fun and challenge of the game.</li>
37
- <li>Use your settings moderately. Settings can help you modify the game's environment, such as the time of day, the weather, the sound effects, and more. You can use the hack tool to change them, but don't alter them too much or you will ruin the atmosphere and realism of the game.</li>
38
- </ul>
39
- <h2>Conclusion</h2>
40
- <p>Dark Riddle is a great game that offers a lot of fun and challenge for stealth game lovers. However, if you want to unlock all the skins and quests in the game without spending any money or completing any tasks, you can use Dark Riddle hack. This hack tool allows you to access a hidden menu that gives you unlimited resources and options to customize your game experience. However, you should also be aware of the risks of using the hack tool and use it with caution and moderation. We hope this article has helped you learn how to use Dark Riddle hack and enjoy the game more.</p>
41
- <p>If you liked this article, please share it with your friends and leave a comment below. Also, if you have any questions or suggestions about Dark Riddle hack, feel free to contact us. We would love to hear from you.</p>
42
- <h2>FAQs</h2>
43
- <p>Here are some frequently asked questions about Dark Riddle hack:</p>
44
- <p>dark riddle hack menu all skin<br />
45
- dark riddle hack apk download<br />
46
- dark riddle hack mod unlimited money<br />
47
- dark riddle hack ios no jailbreak<br />
48
- dark riddle hack online generator<br />
49
- dark riddle hack version latest<br />
50
- dark riddle hack cheats codes<br />
51
- dark riddle hack android no root<br />
52
- dark riddle hack free gems and coins<br />
53
- dark riddle hack tool without survey<br />
54
- dark riddle hack gameplay walkthrough<br />
55
- dark riddle hack new quest and skin<br />
56
- dark riddle hack pc windows 10<br />
57
- dark riddle hack reddit tips and tricks<br />
58
- dark riddle hack update 2023<br />
59
- dark riddle hack no verification or password<br />
60
- dark riddle hack how to get premium items<br />
61
- dark riddle hack easy and fast<br />
62
- dark riddle hack for iphone and ipad<br />
63
- dark riddle hack best guide and tutorial<br />
64
- dark riddle hack review and rating<br />
65
- dark riddle hack glitch and bug fix<br />
66
- dark riddle hack secrets and hidden features<br />
67
- dark riddle hack fun and funny moments<br />
68
- dark riddle hack support and feedback</p>
69
- <ul>
70
- <li>Q: Is Dark Riddle hack safe to use?</li>
71
- <li>A: Dark Riddle hack is safe to use if you download it from a reliable source and scan it with an antivirus program before installing it. However, you should also be careful not to get banned from the game or get malware on your device by using the hack tool.</li>
72
- <li>Q: Is Dark Riddle hack free to use?</li>
73
- <li>A: Dark Riddle hack is free to use and does not require any payment or registration. However, you should also respect the developers of the game and support them by buying some in-app purchases if you can afford them.</li>
74
- <li>Q: Is Dark Riddle hack easy to use?</li>
75
- <li>A: Dark Riddle hack is easy to use and does not require any technical skills or knowledge. You just need to follow some simple steps to access the hack menu and choose the options you want.</li>
76
- <li>Q: Is Dark Riddle hack compatible with all devices?</li>
77
- <li>A: Dark Riddle hack is compatible with all devices that can run the game, such as Android and iOS devices. However, you should also make sure that your device has enough storage space and battery life to run the game and the hack tool smoothly.</li>
78
- <li>Q: Is Dark Riddle hack updated regularly?</li>
79
- <li>A: Dark Riddle hack is updated regularly to keep up with the latest version of the game and fix any bugs or errors. However, you should also check for updates frequently and download them as soon as they are available.</li>
80
- </ul></p> 197e85843d<br />
81
- <br />
82
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download 8 Ball Pool for Java and Play with Friends Online.md DELETED
@@ -1,95 +0,0 @@
1
-
2
- <h1>Download 8 Ball Pool for Java: A Guide for Pool Lovers</h1>
3
- <p>If you are a fan of pool games, you might have heard of <strong>8 Ball Pool</strong>, one of the most popular and addictive online pool games in the world. But did you know that you can also play it on your <strong>Java platform</strong>? In this article, we will show you how to download and play 8 Ball Pool on Java, as well as some tips and tricks to improve your game. Let's get started!</p>
4
- <h2>download 8 ball pool for java</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNSUQ">https://jinyurl.com/2uNSUQ</a></b></p><br /><br />
5
- <h2>What is 8 Ball Pool?</h2>
6
- <p>8 Ball Pool is an online pool game where you can compete with other players from around the world in various game modes, such as <em>PvP</em>, <em>Tournaments</em>, <em>9 Ball</em>, and more. You can also customize your cue and table, buy new items in the <em>Pool Shop</em>, and join clubs to chat with other players. The game is developed by <strong>Miniclip</strong>, a leading online gaming company, and has over <strong>500 million downloads</strong> on Google Play. It is also available on other platforms, such as iOS, Windows, and web browsers.</p>
7
- <h2>What is Java Platform?</h2>
8
- <p>Java platform is a suite of programs that facilitate developing and running programs written in the <strong>Java programming language</strong>. It includes an execution engine (called a <em>virtual machine</em>), a compiler, and a set of libraries. Java platform is independent of any particular operating system, which makes Java programs run identically on all of them. Java is a programming language and computing platform first released by <strong>Sun Microsystems</strong> in 1995. The Java Virtual Machine (JVM) is platform-dependent, meaning that different operating systems require different JVMs.</p>
9
- <h2>How to Download 8 Ball Pool for Java?</h2>
10
- <p>To download and play 8 Ball Pool on Java, you need to follow these steps:</p>
11
- <h3>Step 1: Check your system requirements and compatibility</h3>
12
- <p>Before you download the game, make sure that your device meets the minimum system requirements for running Java programs. You can check them here. Also, make sure that your device supports the <strong>Jar format</strong>, which is the file format for Java applications. You can check this by looking at the file extension of your downloaded files. If they end with <code>.jar. </code>, then they are Jar files and you can run them on your Java platform. If they end with <code>.jad</code>, then they are Jad files and you need to convert them to Jar files first. You can use online tools like this one to do that.</p>
13
- <h3>Step 2: Download and install Java Runtime Environment (JRE)</h3>
14
- <p>Java Runtime Environment (JRE) is a software package that provides the libraries and components needed to run Java applications. You can download the latest version of JRE from the official website. Choose the version that matches your operating system and device architecture. After downloading, follow the instructions to install JRE on your device.</p>
15
- <h3>Step 3: Download and install 8 Ball Pool from a trusted source</h3>
16
- <p>There are many websites that offer 8 Ball Pool for Java, but not all of them are safe and reliable. Some of them may contain malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download 8 Ball Pool from a trusted source, such as the official Miniclip website. You can also use other reputable sources, such as Mobile9 or Phoneky. After downloading, locate the Jar file on your device and open it to install 8 Ball Pool on your Java platform.</p>
17
- <p>How to download 8 ball pool for java phones<br />
18
- Download 8 ball pool for java mobile free<br />
19
- Download 8 ball pool for java jar file<br />
20
- Download 8 ball pool for java touch screen<br />
21
- Download 8 ball pool for java game online<br />
22
- Download 8 ball pool for java from miniclip.com[^1^]<br />
23
- Download 8 ball pool for java github projects[^2^]<br />
24
- Download 8 ball pool for java mod apk<br />
25
- Download 8 ball pool for java hack tool<br />
26
- Download 8 ball pool for java cheat codes<br />
27
- Download 8 ball pool for java offline mode<br />
28
- Download 8 ball pool for java multiplayer support<br />
29
- Download 8 ball pool for java latest version<br />
30
- Download 8 ball pool for java update patch<br />
31
- Download 8 ball pool for java tutorial guide<br />
32
- Download 8 ball pool for java best tips and tricks<br />
33
- Download 8 ball pool for java reviews and ratings<br />
34
- Download 8 ball pool for java gameplay videos<br />
35
- Download 8 ball pool for java screenshots and wallpapers<br />
36
- Download 8 ball pool for java system requirements<br />
37
- Download 8 ball pool for java installation steps<br />
38
- Download 8 ball pool for java error fixes<br />
39
- Download 8 ball pool for java alternatives and similar games<br />
40
- Download 8 ball pool for java FAQs and forums<br />
41
- Download 8 ball pool for java customer service and support<br />
42
- Download 8 ball pool for java official website[^3^]<br />
43
- Download 8 ball pool for java free trial and demo<br />
44
- Download 8 ball pool for java premium features and benefits<br />
45
- Download 8 ball pool for java discount and coupon codes<br />
46
- Download 8 ball pool for java refund and cancellation policy<br />
47
- Download 8 ball pool for java terms and conditions<br />
48
- Download 8 ball pool for java privacy policy and security<br />
49
- Download 8 ball pool for java awards and achievements<br />
50
- Download 8 ball pool for java history and development<br />
51
- Download 8 ball pool for java team and developers<br />
52
- Download 8 ball pool for java testimonials and feedbacks<br />
53
- Download 8 ball pool for java news and updates<br />
54
- Download 8 ball pool for java events and tournaments<br />
55
- Download 8 ball pool for java challenges and missions<br />
56
- Download 8 ball pool for java rules and regulations<br />
57
- Download 8 ball pool for java strategies and techniques<br />
58
- Download 8 ball pool for java skills and levels<br />
59
- Download 8 ball pool for java coins and cash generator<br />
60
- Download 8 ball pool for java cues and tables collection<br />
61
- Download 8 ball pool for java friends and chat feature<br />
62
- Download 8 ball pool for java leaderboard and ranking system<br />
63
- Download 8 ball pool for java statistics and analytics<br />
64
- Download 8 ball pool for java customizations and settings</p>
65
- <h2>How to Play 8 Ball Pool on Java?</h2>
66
- <p>Playing 8 Ball Pool on Java is similar to playing it on other platforms, but there are some differences in the controls and interface. Here are the steps to play 8 Ball Pool on Java:</p>
67
- <h3>Step 1: Launch the game and sign in with your account</h3>
68
- <p>After installing 8 Ball Pool on your Java platform, launch the game by clicking on its icon. You will see the main menu with several options, such as <em>Play Online</em>, <em>Play Offline</em>, <em>Pool Shop</em>, <em>Settings</em>, and more. To play online, you need to sign in with your Miniclip account or create one if you don't have one. You can also sign in with your Facebook account if you want to sync your progress and access your friends list. To play offline, you don't need to sign in, but you will have limited features and modes.</p>
69
- <h3>Step 2: Choose a game mode and a table</h3>
70
- <p>After signing in, you can choose a game mode from the following options: <em>PvP</em>, where you can play against another player in real time; <em>Tournaments</em>, where you can join a bracket of players and compete for prizes; <em>9 Ball</em>, where you can play a different variation of pool with only nine balls; and <em>Practice</em>, where you can practice your shots without any pressure. You can also choose a table from different themes and styles, such as <em>London</em>, <em>Sydney</em>, <em>Moscow</em>, and more. Each table has a different entry fee and reward, so choose wisely according to your skill level and budget.</p>
71
- <h3>Step 3: Aim and shoot your cue ball</h3>
72
- <p>Once you enter a game, you will see the pool table with the balls arranged in a triangle. The game follows the standard rules of 8 ball pool, which means that you have to pot all the balls of your assigned group (solid or striped) before potting the black 8 ball. To aim your cue ball, use the arrow keys or the number keys on your keypad. To adjust the power of your shot, use the * key or the # key on your keypad. To shoot, press the OK button or the 5 key on your keypad. You can also use the spin feature by pressing the left soft key or the right soft key on your keypad. This will allow you to control the direction and speed of your cue ball after hitting another ball.</p>
73
- <h2>Tips and Tricks for Playing 8 Ball Pool on Java</h2>
74
- <p>To improve your game and have more fun playing 8 Ball Pool on Java, here are some tips and tricks that you should know:</p>
75
- <h3>Tip 1: Use the spin feature to control the cue ball</h3>
76
- <p>The spin feature is one of the most useful tools in 8 Ball Pool, as it can help you avoid scratches, get better positions, and make trick shots. To use it, press the left soft key or the right soft key on your keypad when aiming your cue ball. You will see a circle with four arrows around it, indicating the direction of the spin. You can choose from four types of spin: top spin, back spin, left spin, and right spin. Each type of spin has a different effect on the cue ball's movement and angle. For example, top spin will make the cue ball move forward after hitting another ball, while back spin will make it move backward. Left spin and right spin will make the cue ball curve to the left or right, respectively. You can use these effects to avoid obstacles, get closer to your target ball, or make difficult shots.</p>
77
- <h3>Tip 2: Practice your shots in offline mode</h3>
78
- <p>If you want to improve your skills and confidence in 8 Ball Pool, you should practice your shots in offline mode. Offline mode allows you to play against the computer or yourself without any internet connection or entry fee. You can choose from different difficulty levels and table themes, and you can also adjust the rules and settings of the game. Offline mode is a great way to learn the basics of the game, test your strategies, and have fun without any pressure or risk.</p>
79
- <h3>Tip 3: Challenge your friends and other players online</h3>
80
- <p>One of the best features of 8 Ball Pool is that you can challenge your friends and other players online. You can invite your friends from Facebook or Miniclip to play with you, or you can join a random match with someone from anywhere in the world. You can also chat with your opponent during the game, send them emojis, and add them as friends. Playing online is not only fun and exciting, but also rewarding and challenging. You can earn coins, items, trophies, and ranking points by winning matches, and you can also join clubs and tournaments to compete with other players and teams.</p>
81
- <h2>Conclusion</h2>
82
- <p>8 Ball Pool is a fantastic online pool game that you can play on your Java platform. It has amazing graphics, realistic physics, and addictive gameplay. You can download it from a trusted source, install it on your device, and enjoy playing it anytime and anywhere. You can also use some tips and tricks to improve your game and have more fun. Whether you are a beginner or a pro, 8 Ball Pool has something for everyone. So what are you waiting for? Download 8 Ball Pool for Java today and join the millions of pool lovers around the world!</p>
83
- <h2>FAQs</h2>
84
- <h3>Q1: Is 8 Ball Pool free to play on Java?</h3>
85
- <p>A1: Yes, 8 Ball Pool is free to play on Java. However, some features and items may require in-app purchases or real money.</p>
86
- <h3>Q2: How can I get more coins and items in 8 Ball Pool?</h3>
87
- <p>A2: You can get more coins and items in 8 Ball Pool by winning matches, completing missions, spinning the wheel, watching ads, or buying them with real money.</p>
88
- <h3>Q3: How can I improve my skills and ranking in 8 Ball Pool?</h3>
89
- <p>A3: You can improve your skills and ranking in 8 Ball Pool by practicing your shots in offline mode, learning from other players online, using the spin feature wisely, and joining clubs and tournaments.</p>
90
- <h3>Q4: What are the differences between 8 Ball Pool on Java and other platforms?</h3>
91
- <p>A4: The main differences between 8 Ball Pool on Java and other platforms are the controls and interface. On Java platform, you use the keypad to aim and shoot your cue ball, while on other platforms, you use the touch screen or the mouse. The interface on Java platform is also simpler and less cluttered than on other platforms.</p>
92
- <h3>Q5: What are the best sources to download 8 Ball Pool for Java?</h3>
93
- <p>A5: The best sources to download 8 Ball Pool for Java are the official Miniclip website, Mobile9, Phoneky, or any other reputable website that offers safe and reliable downloads.</p> 401be4b1e0<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Free Download IGNOU Solved Assignment for M.Com 2023 All Subjects and Languages.md DELETED
@@ -1,88 +0,0 @@
1
-
2
- <h1>IGNOU Solved Assignment Free Download M.Com: A Complete Guide</h1>
3
- <p>If you are pursuing a Master of Commerce (M.Com) course from Indira Gandhi National Open University (IGNOU), you might be wondering how to get the solved assignments for your course. Solved assignments are important for completing your course and getting good marks in the exams. In this article, we will tell you everything you need to know about IGNOU solved assignment free download M.Com, including what is the M.Com course, why do you need solved assignments, and how to download them easily.</p>
4
- <h2>ignou solved assignment free download m.com</h2><br /><p><b><b>Download File</b> --->>> <a href="https://jinyurl.com/2uNQWO">https://jinyurl.com/2uNQWO</a></b></p><br /><br />
5
- <h2>What is IGNOU M.Com Course?</h2>
6
- <p>IGNOU M.Com course is a two-year postgraduate program that offers a comprehensive and advanced study of various aspects of commerce, such as accounting, finance, marketing, management, economics, and taxation. The course aims to develop the skills and knowledge of the students in the field of commerce and prepare them for various career opportunities in the public and private sectors.</p>
7
- <h3>Eligibility Criteria</h3>
8
- <p>To be eligible for admission to IGNOU M.Com course, you need to have a bachelor's degree or equivalent in any discipline from a recognized university. You also need to have at least 50% marks in aggregate or equivalent grade point average (GPA). However, there is a relaxation of 5% marks for SC/ST/OBC/PWD candidates.</p>
9
- <h3>Course Structure</h3>
10
- <p>The IGNOU M.Com course consists of 12 courses, out of which six are compulsory and six are elective. The compulsory courses cover the core subjects of commerce, such as business environment, financial management, marketing management, organizational behavior, and research methodology. The elective courses allow the students to choose from various specializations, such as accounting and finance, banking and insurance, business policy and corporate governance, international business operations, and management accounting and financial strategies.</p>
11
- <h3>Course Fee</h3>
12
- <p>The total fee for IGNOU M.Com course is Rs. 13,200/-, which is payable in two installments of Rs. 6,600/- each. The fee includes the registration fee, examination fee, study material fee, and other charges. The fee can be paid online through debit card/credit card/net banking or offline through demand draft/bank challan.</p>
13
- <h2>Why Do You Need IGNOU Solved Assignments?</h2>
14
- <p>IGNOU solved assignments are an essential part of your M.Com course. They are written assignments that you need to submit to your study center before the due date. They carry 30% weightage in your final marks and help you to improve your understanding of the course content.</p>
15
- <h3>Benefits of Solved Assignments</h3>
16
- <p>Some of the benefits of solved assignments are:</p>
17
- <p>ignou m.com solved assignment 2022-2023 pdf free download<br />
18
- ignou m.com first year solved assignment free download<br />
19
- ignou m.com second year solved assignment free download<br />
20
- ignou m.com ibo solved assignment free download<br />
21
- ignou m.com mco solved assignment free download<br />
22
- ignou m.com books with solved assignment free download<br />
23
- ignou m.com assignment solution free download<br />
24
- ignou m.com assignment answer key free download<br />
25
- ignou m.com assignment question paper free download<br />
26
- ignou m.com assignment submission date 2022-2023<br />
27
- ignou m.com assignment status 2022-2023<br />
28
- ignou m.com assignment marks 2022-2023<br />
29
- ignou m.com assignment grade card 2022-2023<br />
30
- ignou m.com assignment result 2022-2023<br />
31
- ignou m.com assignment online submission 2022-2023<br />
32
- ignou m.com assignment online payment 2022-2023<br />
33
- ignou m.com assignment online verification 2022-2023<br />
34
- ignou m.com assignment online correction 2022-2023<br />
35
- ignou m.com assignment online help 2022-2023<br />
36
- ignou m.com assignment online support 2022-2023<br />
37
- ignou m.com solved assignment sample free download<br />
38
- ignou m.com solved assignment format free download<br />
39
- ignou m.com solved assignment guide free download<br />
40
- ignou m.com solved assignment tips free download<br />
41
- ignou m.com solved assignment tricks free download<br />
42
- ignou m.com solved assignment best site free download<br />
43
- ignou m.com solved assignment latest edition free download<br />
44
- ignou m.com solved assignment updated version free download<br />
45
- ignou m.com solved assignment quality content free download<br />
46
- ignou m.com solved assignment high score free download</p>
47
- <ul>
48
- <li>They help you to revise the topics and concepts that you have learned in the study material.</li>
49
- <li>They enable you to practice your writing skills and express your views and opinions on various issues related to commerce.</li>
50
- <li>They provide you with feedback and suggestions from your tutors and peers on your performance and areas of improvement.</li>
51
- <li>They prepare you for the term-end examinations by giving you an idea of the type and pattern of questions that can be asked.</li>
52
- <li>They boost your confidence and motivation by rewarding you with marks and grades for your hard work.</li>
53
- </ul>
54
- <h3>How to Submit Solved Assignments</h3>
55
- <p>To submit your solved assignments, you need to follow these steps:</p>
56
- <ol>
57
- <li>Download the assignment questions from the official website of IGNOU or collect them from your study center.</li>
58
- <li>Solve the assignments by referring to the study material and using your own words and examples.</li>
59
- <li>Write your name, enrollment number, course code, course title, assignment code, study center code, and date on the first page of each assignment.</li>
60
- <li>Make sure that your handwriting is neat and legible and that you follow the word limit and format specified in the assignment guidelines.</li>
61
- <li>Attach a copy of the assignment submission form with each assignment and keep a copy of the assignments and the form for your reference.</li>
62
- <li>Submit your assignments to your study center coordinator before the last date of submission.</li>
63
- </ol>
64
- <h2>How to Download IGNOU Solved Assignments for M.Com?</h2>
65
- <p>If you are looking for IGNOU solved assignments for M.Com, you can download them for free from various online sources. However, you should be careful about the quality and authenticity of the solved assignments and use them only as a reference and not as a substitute for your own work. To download IGNOU solved assignments for M.Com, you can follow these steps:</p>
66
- <h3>Visit the Official Website of IGNOU</h3>
67
- <p>The first step is to visit the official website of IGNOU at <a href="">www.ignou.ac.in</a>. Here, you can find the latest updates and notifications regarding the M.Com course and the assignments. You can also access the study material and other resources for your course.</p>
68
- <h3>Select Your Course and Session</h3>
69
- <p>The next step is to select your course and session from the drop-down menu on the homepage. You will be redirected to a new page where you can see the list of courses offered by IGNOU. Click on the M.Com course and then choose your session (July or January). You will see the details of the course, such as the syllabus, admission procedure, evaluation scheme, etc.</p>
70
- <h3>Download the Solved Assignments in PDF Format</h3>
71
- <p>The final step is to download the solved assignments in PDF format from the links provided on the same page. You can find the solved assignments for both compulsory and elective courses for each session. You can also download the assignment questions and guidelines from here. You can save the solved assignments on your device or print them out for your convenience.</p>
72
- <h2>Conclusion</h2>
73
- <p>IGNOU M.Com course is a great option for those who want to pursue higher studies in commerce and enhance their career prospects. However, to complete the course successfully, you need to submit the solved assignments on time and with quality. You can download IGNOU solved assignment free download M.Com from various online sources, but you should use them only as a reference and not as a copy. You should also follow the instructions and guidelines given by IGNOU for writing and submitting your assignments. By doing so, you can improve your learning outcomes and achieve your academic goals.</p>
74
- <h2>FAQs</h2>
75
- <p>Here are some frequently asked questions about IGNOU solved assignment free download M.Com:</p>
76
- <h4>Q1: What is the last date of submission of IGNOU M.Com assignments?</h4>
77
- <p>A1: The last date of submission of IGNOU M.Com assignments depends on your session. For July session, it is 31st March of the next year. For January session, it is 30th September of the same year.</p>
78
- <h4>Q2: How many marks are required to pass IGNOU M.Com assignments?</h4>
79
- <p>A2: You need to score at least 40% marks in each assignment to pass it. The marks obtained in the assignments are added to your term-end examination marks to calculate your final grade.</p>
80
- <h4>Q3: How can I check my IGNOU M.Com assignment status?</h4>
81
- <p>A3: You can check your IGNOU M.Com assignment status online by visiting <a href="">https://admission.ignou.ac.in/changeadmdata/StatusAssignment.ASP</a>. Here, you need to enter your enrollment number, program code, and date of birth to view your assignment status.</p>
82
- <h4>Q4: Can I re-submit my IGNOU M.Com assignment if I am not satisfied with my marks?</h4>
83
- <p>A4: No, you cannot re-submit your IGNOU M.Com assignment once it is submitted. However, you can improve your marks by performing well in the term-end examinations.</p>
84
- <h4>Q5: Where can I get more information about IGNOU M.Com course and assignments?</h4>
85
- <p>A5: You can get more information about IGNOU M.Com course and assignments from <a href="">http://www.ignou.ac.in/ignou/aboutignou/school/soms/programmes/detail/164/2</a>. Here you can find the course objectives, outcomes, curriculum, faculty, and contact details. You can also download the prospectus and application form from here.</p>
86
- <p>I hope this article has helped you to understand IGNOU solved assignment free download M.Com better. If you have any queries or suggestions, please feel free to leave a comment below. Thank you for reading and happy learning!</p> 197e85843d<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/pndm/__init__.py DELETED
@@ -1,17 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # flake8: noqa
17
- from .pipeline_pndm import PNDMPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/prepare/download_glove.sh DELETED
@@ -1,9 +0,0 @@
1
- echo -e "Downloading glove (in use by the evaluators)"
2
- gdown --fuzzy https://drive.google.com/file/d/1bCeS6Sh_mLVTebxIgiUHgdPrroW06mb6/view?usp=sharing
3
- rm -rf glove
4
-
5
- unzip glove.zip
6
- echo -e "Cleaning\n"
7
- rm glove.zip
8
-
9
- echo -e "Downloading done!"
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/__init__.py DELETED
@@ -1,82 +0,0 @@
1
- import librosa
2
- import numpy as np
3
- import pyloudnorm as pyln
4
-
5
- from text_to_speech.utils.audio.vad import trim_long_silences
6
-
7
-
8
- def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
9
- '''compute right padding (final frame) or both sides padding (first and final frames)
10
- '''
11
- assert pad_sides in (1, 2)
12
- # return int(fsize // 2)
13
- pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
14
- if pad_sides == 1:
15
- return 0, pad
16
- else:
17
- return pad // 2, pad // 2 + pad % 2
18
-
19
-
20
- def amp_to_db(x):
21
- return 20 * np.log10(np.maximum(1e-5, x))
22
-
23
-
24
- def db_to_amp(x):
25
- return 10.0 ** (x * 0.05)
26
-
27
-
28
- def normalize(S, min_level_db):
29
- return (S - min_level_db) / -min_level_db
30
-
31
-
32
- def denormalize(D, min_level_db):
33
- return (D * -min_level_db) + min_level_db
34
-
35
-
36
- def librosa_wav2spec(wav_path,
37
- fft_size=1024,
38
- hop_size=256,
39
- win_length=1024,
40
- window="hann",
41
- num_mels=80,
42
- fmin=80,
43
- fmax=-1,
44
- eps=1e-6,
45
- sample_rate=22050,
46
- loud_norm=False,
47
- trim_long_sil=False):
48
- if isinstance(wav_path, str):
49
- if trim_long_sil:
50
- wav, _, _ = trim_long_silences(wav_path, sample_rate)
51
- else:
52
- wav, _ = librosa.core.load(wav_path, sr=sample_rate)
53
- else:
54
- wav = wav_path
55
-
56
- if loud_norm:
57
- meter = pyln.Meter(sample_rate) # create BS.1770 meter
58
- loudness = meter.integrated_loudness(wav)
59
- wav = pyln.normalize.loudness(wav, loudness, -22.0)
60
- if np.abs(wav).max() > 1:
61
- wav = wav / np.abs(wav).max()
62
-
63
- # get amplitude spectrogram
64
- x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size,
65
- win_length=win_length, window=window, pad_mode="constant")
66
- linear_spc = np.abs(x_stft) # (n_bins, T)
67
-
68
- # get mel basis
69
- fmin = 0 if fmin == -1 else fmin
70
- fmax = sample_rate / 2 if fmax == -1 else fmax
71
- mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax)
72
-
73
- # calculate mel spec
74
- mel = mel_basis @ linear_spc
75
- mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T)
76
- l_pad, r_pad = librosa_pad_lr(wav, fft_size, hop_size, 1)
77
- wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0)
78
- wav = wav[:mel.shape[1] * hop_size]
79
-
80
- # log linear spec
81
- linear_spc = np.log10(np.maximum(eps, linear_spc))
82
- return {'wav': wav, 'mel': mel.T, 'linear': linear_spc.T, 'mel_basis': mel_basis}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/diffusionmodules/custom_openaimodel.py DELETED
@@ -1,368 +0,0 @@
1
- from abc import abstractmethod
2
- from functools import partial
3
- import math
4
- from typing import Iterable
5
-
6
- import numpy as np
7
- import torch as th
8
- import torch.nn as nn
9
- import torch.nn.functional as F
10
-
11
- from ldm.modules.diffusionmodules.util import (
12
- checkpoint,
13
- conv_nd,
14
- linear,
15
- avg_pool_nd,
16
- zero_module,
17
- normalization,
18
- timestep_embedding,
19
- )
20
- from ldm.modules.attention import SpatialTransformer
21
- from ldm.modules.diffusionmodules.openaimodel import convert_module_to_f16, convert_module_to_f32, AttentionPool2d, \
22
- TimestepBlock, TimestepEmbedSequential, Upsample, TransposedUpsample, Downsample, ResBlock, AttentionBlock, count_flops_attn, \
23
- QKVAttentionLegacy, QKVAttention
24
-
25
-
26
- class UNetModel(nn.Module):
27
- """
28
- The full UNet model with attention and timestep embedding.
29
- :param in_channels: channels in the input Tensor.
30
- :param model_channels: base channel count for the model.
31
- :param out_channels: channels in the output Tensor.
32
- :param num_res_blocks: number of residual blocks per downsample.
33
- :param attention_resolutions: a collection of downsample rates at which
34
- attention will take place. May be a set, list, or tuple.
35
- For example, if this contains 4, then at 4x downsampling, attention
36
- will be used.
37
- :param dropout: the dropout probability.
38
- :param channel_mult: channel multiplier for each level of the UNet.
39
- :param conv_resample: if True, use learned convolutions for upsampling and
40
- downsampling.
41
- :param dims: determines if the signal is 1D, 2D, or 3D.
42
- :param num_classes: if specified (as an int), then this model will be
43
- class-conditional with `num_classes` classes.
44
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
45
- :param num_heads: the number of attention heads in each attention layer.
46
- :param num_heads_channels: if specified, ignore num_heads and instead use
47
- a fixed channel width per attention head.
48
- :param num_heads_upsample: works with num_heads to set a different number
49
- of heads for upsampling. Deprecated.
50
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
51
- :param resblock_updown: use residual blocks for up/downsampling.
52
- :param use_new_attention_order: use a different attention pattern for potentially
53
- increased efficiency.
54
- """
55
-
56
- def __init__(
57
- self,
58
- image_size,
59
- in_channels,
60
- model_channels,
61
- out_channels,
62
- num_res_blocks,
63
- attention_resolutions,
64
- dropout=0,
65
- channel_mult=(1, 2, 4, 8),
66
- conv_resample=True,
67
- dims=2,
68
- num_classes=None,
69
- use_checkpoint=False,
70
- use_fp16=False,
71
- num_heads=-1,
72
- num_head_channels=-1,
73
- num_heads_upsample=-1,
74
- use_scale_shift_norm=False,
75
- resblock_updown=False,
76
- use_new_attention_order=False,
77
- use_spatial_transformer=False, # custom transformer support
78
- transformer_depth=1, # custom transformer support
79
- context_dim=None, # custom transformer support
80
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
81
- legacy=True,
82
- use_context_project=False, # custom text to audio support
83
- use_context_attn=True # custom text to audio support
84
- ):
85
- super().__init__()
86
- if use_spatial_transformer:
87
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
88
-
89
- if context_dim is not None and not use_context_project:
90
- assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
91
- from omegaconf.listconfig import ListConfig
92
- if type(context_dim) == ListConfig:
93
- context_dim = list(context_dim)
94
-
95
- if num_heads_upsample == -1:
96
- num_heads_upsample = num_heads
97
-
98
- if num_heads == -1:
99
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
100
-
101
- if num_head_channels == -1:
102
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
103
-
104
- self.image_size = image_size
105
- self.in_channels = in_channels
106
- self.model_channels = model_channels
107
- self.out_channels = out_channels
108
- self.num_res_blocks = num_res_blocks
109
- self.attention_resolutions = attention_resolutions
110
- self.dropout = dropout
111
- self.channel_mult = channel_mult
112
- self.conv_resample = conv_resample
113
- self.num_classes = num_classes
114
- self.use_checkpoint = use_checkpoint
115
- self.dtype = th.float16 if use_fp16 else th.float32
116
- self.num_heads = num_heads
117
- self.num_head_channels = num_head_channels
118
- self.num_heads_upsample = num_heads_upsample
119
- self.predict_codebook_ids = n_embed is not None
120
-
121
- time_embed_dim = model_channels * 4
122
- self.time_embed = nn.Sequential(
123
- linear(model_channels, time_embed_dim),
124
- nn.SiLU(),
125
- linear(time_embed_dim, time_embed_dim),
126
- )
127
-
128
- if self.num_classes is not None:
129
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
130
-
131
- self.input_blocks = nn.ModuleList(
132
- [
133
- TimestepEmbedSequential(
134
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
135
- )
136
- ]
137
- )
138
- self._feature_size = model_channels
139
- input_block_chans = [model_channels]
140
- ch = model_channels
141
- ds = 1
142
- for level, mult in enumerate(channel_mult):
143
- for _ in range(num_res_blocks):
144
- layers = [
145
- ResBlock(
146
- ch,
147
- time_embed_dim,
148
- dropout,
149
- out_channels=mult * model_channels,
150
- dims=dims,
151
- use_checkpoint=use_checkpoint,
152
- use_scale_shift_norm=use_scale_shift_norm,
153
- )
154
- ]
155
- ch = mult * model_channels
156
- if ds in attention_resolutions:
157
- if num_head_channels == -1:
158
- dim_head = ch // num_heads
159
- else:
160
- num_heads = ch // num_head_channels
161
- dim_head = num_head_channels
162
- if legacy:
163
- #num_heads = 1
164
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
165
- layers.append(
166
- AttentionBlock(
167
- ch,
168
- use_checkpoint=use_checkpoint,
169
- num_heads=num_heads,
170
- num_head_channels=dim_head,
171
- use_new_attention_order=use_new_attention_order,
172
- ) if not use_spatial_transformer else SpatialTransformer(
173
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
174
- )
175
- )
176
- self.input_blocks.append(TimestepEmbedSequential(*layers))
177
- self._feature_size += ch
178
- input_block_chans.append(ch)
179
- if level != len(channel_mult) - 1:
180
- out_ch = ch
181
- self.input_blocks.append(
182
- TimestepEmbedSequential(
183
- ResBlock(
184
- ch,
185
- time_embed_dim,
186
- dropout,
187
- out_channels=out_ch,
188
- dims=dims,
189
- use_checkpoint=use_checkpoint,
190
- use_scale_shift_norm=use_scale_shift_norm,
191
- down=True,
192
- )
193
- if resblock_updown
194
- else Downsample(
195
- ch, conv_resample, dims=dims, out_channels=out_ch
196
- )
197
- )
198
- )
199
- ch = out_ch
200
- input_block_chans.append(ch)
201
- ds *= 2
202
- self._feature_size += ch
203
-
204
- if num_head_channels == -1:
205
- dim_head = ch // num_heads
206
- else:
207
- num_heads = ch // num_head_channels
208
- dim_head = num_head_channels
209
- if legacy:
210
- #num_heads = 1
211
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
212
- self.middle_block = TimestepEmbedSequential(
213
- ResBlock(
214
- ch,
215
- time_embed_dim,
216
- dropout,
217
- dims=dims,
218
- use_checkpoint=use_checkpoint,
219
- use_scale_shift_norm=use_scale_shift_norm,
220
- ),
221
- AttentionBlock(
222
- ch,
223
- use_checkpoint=use_checkpoint,
224
- num_heads=num_heads,
225
- num_head_channels=dim_head,
226
- use_new_attention_order=use_new_attention_order,
227
- ) if not use_spatial_transformer else SpatialTransformer(
228
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
229
- ),
230
- ResBlock(
231
- ch,
232
- time_embed_dim,
233
- dropout,
234
- dims=dims,
235
- use_checkpoint=use_checkpoint,
236
- use_scale_shift_norm=use_scale_shift_norm,
237
- ),
238
- )
239
- self._feature_size += ch
240
-
241
- self.output_blocks = nn.ModuleList([])
242
- for level, mult in list(enumerate(channel_mult))[::-1]:
243
- for i in range(num_res_blocks + 1):
244
- ich = input_block_chans.pop()
245
- layers = [
246
- ResBlock(
247
- ch + ich,
248
- time_embed_dim,
249
- dropout,
250
- out_channels=model_channels * mult,
251
- dims=dims,
252
- use_checkpoint=use_checkpoint,
253
- use_scale_shift_norm=use_scale_shift_norm,
254
- )
255
- ]
256
- ch = model_channels * mult
257
- if ds in attention_resolutions:
258
- if num_head_channels == -1:
259
- dim_head = ch // num_heads
260
- else:
261
- num_heads = ch // num_head_channels
262
- dim_head = num_head_channels
263
- if legacy:
264
- #num_heads = 1
265
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
266
- layers.append(
267
- AttentionBlock(
268
- ch,
269
- use_checkpoint=use_checkpoint,
270
- num_heads=num_heads_upsample,
271
- num_head_channels=dim_head,
272
- use_new_attention_order=use_new_attention_order,
273
- ) if not use_spatial_transformer else SpatialTransformer(
274
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
275
- )
276
- )
277
- if level and i == num_res_blocks:
278
- out_ch = ch
279
- layers.append(
280
- ResBlock(
281
- ch,
282
- time_embed_dim,
283
- dropout,
284
- out_channels=out_ch,
285
- dims=dims,
286
- use_checkpoint=use_checkpoint,
287
- use_scale_shift_norm=use_scale_shift_norm,
288
- up=True,
289
- )
290
- if resblock_updown
291
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
292
- )
293
- ds //= 2
294
- self.output_blocks.append(TimestepEmbedSequential(*layers))
295
- self._feature_size += ch
296
-
297
- self.out = nn.Sequential(
298
- normalization(ch),
299
- nn.SiLU(),
300
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
301
- )
302
- if self.predict_codebook_ids:
303
- self.id_predictor = nn.Sequential(
304
- normalization(ch),
305
- conv_nd(dims, model_channels, n_embed, 1),
306
- #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
307
- )
308
-
309
- self.use_context_project = use_context_project
310
- if use_context_project:
311
- self.context_project = linear(context_dim, time_embed_dim)
312
- self.use_context_attn = use_context_attn
313
-
314
-
315
- def convert_to_fp16(self):
316
- """
317
- Convert the torso of the model to float16.
318
- """
319
- self.input_blocks.apply(convert_module_to_f16)
320
- self.middle_block.apply(convert_module_to_f16)
321
- self.output_blocks.apply(convert_module_to_f16)
322
-
323
- def convert_to_fp32(self):
324
- """
325
- Convert the torso of the model to float32.
326
- """
327
- self.input_blocks.apply(convert_module_to_f32)
328
- self.middle_block.apply(convert_module_to_f32)
329
- self.output_blocks.apply(convert_module_to_f32)
330
-
331
- def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
332
- """
333
- Apply the model to an input batch.
334
- :param x: an [N x C x ...] Tensor of inputs.
335
- :param timesteps: a 1-D batch of timesteps.
336
- :param context: conditioning plugged in via crossattn
337
- :param y: an [N] Tensor of labels, if class-conditional.
338
- :return: an [N x C x ...] Tensor of outputs.
339
- """
340
- assert (y is not None) == (
341
- self.num_classes is not None
342
- ), "must specify y if and only if the model is class-conditional"
343
- hs = []
344
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
345
- emb = self.time_embed(t_emb)
346
-
347
- if self.num_classes is not None:
348
- assert y.shape == (x.shape[0],)
349
- emb = emb + self.label_emb(y)
350
-
351
- # For text-to-audio using global CLIP
352
- if self.use_context_project:
353
- context = self.context_project(context)
354
- emb = emb + context.squeeze(1)
355
-
356
- h = x.type(self.dtype)
357
- for module in self.input_blocks:
358
- h = module(h, emb, context if self.use_context_attn else None)
359
- hs.append(h)
360
- h = self.middle_block(h, emb, context if self.use_context_attn else None)
361
- for module in self.output_blocks:
362
- h = th.cat([h, hs.pop()], dim=1)
363
- h = module(h, emb, context if self.use_context_attn else None)
364
- h = h.type(x.dtype)
365
- if self.predict_codebook_ids:
366
- return self.id_predictor(h)
367
- else:
368
- return self.out(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/lstm.py DELETED
@@ -1,25 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from torch import nn
8
-
9
-
10
- class StreamableLSTM(nn.Module):
11
- """LSTM without worrying about the hidden state, nor the layout of the data.
12
- Expects input as convolutional layout.
13
- """
14
- def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
15
- super().__init__()
16
- self.skip = skip
17
- self.lstm = nn.LSTM(dimension, dimension, num_layers)
18
-
19
- def forward(self, x):
20
- x = x.permute(2, 0, 1)
21
- y, _ = self.lstm(x)
22
- if self.skip:
23
- y = y + x
24
- y = y.permute(1, 2, 0)
25
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/output_parser/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- from agentverse.registry import Registry
2
-
3
- output_parser_registry = Registry(name="OutputParserRegistry")
4
-
5
- from .output_parser import *
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/lineprogresscanvas/LineProgressCanvas.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import LineProgressCanvas from "../../../plugins/lineprogresscanvas";
2
- export default LineProgressCanvas;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Factory.js DELETED
@@ -1,11 +0,0 @@
1
- import Perspective from './Perspective.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('perspective', function (gameObject, config) {
6
- return new Perspective(gameObject, config);
7
- });
8
-
9
- SetValue(window, 'RexPlugins.UI.Perspective', Perspective);
10
-
11
- export default Perspective;
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alexxggs/ggvpnewen/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Ggvpnewen
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.9
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlgoveraAI/ocean-marketplace/README.md DELETED
@@ -1,46 +0,0 @@
1
- ---
2
- title: Ocean Marketplace
3
- emoji: 🧺
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- license: mit
10
- ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio`, `streamlit`, or `static`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
35
- Path is relative to the root of the repository.
36
-
37
- `models`: _List[string]_
38
- HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
39
- Will be parsed automatically from your code if not specified here.
40
-
41
- `datasets`: _List[string]_
42
- HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
43
- Will be parsed automatically from your code if not specified here.
44
-
45
- `pinned`: _boolean_
46
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/scripts/download_models.sh DELETED
@@ -1,32 +0,0 @@
1
- mkdir ./checkpoints
2
-
3
- # lagency download link
4
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/auido2exp_00300-model.pth -O ./checkpoints/auido2exp_00300-model.pth
5
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/auido2pose_00140-model.pth -O ./checkpoints/auido2pose_00140-model.pth
6
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/epoch_20.pth -O ./checkpoints/epoch_20.pth
7
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/facevid2vid_00189-model.pth.tar -O ./checkpoints/facevid2vid_00189-model.pth.tar
8
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/shape_predictor_68_face_landmarks.dat -O ./checkpoints/shape_predictor_68_face_landmarks.dat
9
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/wav2lip.pth -O ./checkpoints/wav2lip.pth
10
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/mapping_00229-model.pth.tar -O ./checkpoints/mapping_00229-model.pth.tar
11
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/mapping_00109-model.pth.tar -O ./checkpoints/mapping_00109-model.pth.tar
12
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/hub.zip -O ./checkpoints/hub.zip
13
- # unzip -n ./checkpoints/hub.zip -d ./checkpoints/
14
-
15
-
16
- #### download the new links.
17
- wget -nc https://github.com/OpenTalker/SadTalker/releases/download/v0.0.2-rc/mapping_00109-model.pth.tar -O ./checkpoints/mapping_00109-model.pth.tar
18
- wget -nc https://github.com/OpenTalker/SadTalker/releases/download/v0.0.2-rc/mapping_00229-model.pth.tar -O ./checkpoints/mapping_00229-model.pth.tar
19
- wget -nc https://github.com/OpenTalker/SadTalker/releases/download/v0.0.2-rc/SadTalker_V0.0.2_256.safetensors -O ./checkpoints/SadTalker_V0.0.2_256.safetensors
20
- wget -nc https://github.com/OpenTalker/SadTalker/releases/download/v0.0.2-rc/SadTalker_V0.0.2_512.safetensors -O ./checkpoints/SadTalker_V0.0.2_512.safetensors
21
-
22
-
23
- # wget -nc https://github.com/Winfredy/SadTalker/releases/download/v0.0.2/BFM_Fitting.zip -O ./checkpoints/BFM_Fitting.zip
24
- # unzip -n ./checkpoints/BFM_Fitting.zip -d ./checkpoints/
25
-
26
- ### enhancer
27
- mkdir -p ./gfpgan/weights
28
- wget -nc https://github.com/xinntao/facexlib/releases/download/v0.1.0/alignment_WFLW_4HG.pth -O ./gfpgan/weights/alignment_WFLW_4HG.pth
29
- wget -nc https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth -O ./gfpgan/weights/detection_Resnet50_Final.pth
30
- wget -nc https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -O ./gfpgan/weights/GFPGANv1.4.pth
31
- wget -nc https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth -O ./gfpgan/weights/parsing_parsenet.pth
32
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/projectors/w_projector.py DELETED
@@ -1,161 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Project given image to the latent space of pretrained network pickle."""
10
-
11
- import copy
12
- import wandb
13
- import numpy as np
14
- import torch
15
- import torch.nn.functional as F
16
- from tqdm import tqdm
17
- from pti.pti_configs import global_config, hyperparameters
18
- from utils import log_utils
19
- import dnnlib
20
-
21
-
22
- def project(
23
- G,
24
- # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
25
- target: torch.Tensor,
26
- *,
27
- num_steps=1000,
28
- w_avg_samples=10000,
29
- initial_learning_rate=0.01,
30
- initial_noise_factor=0.05,
31
- lr_rampdown_length=0.25,
32
- lr_rampup_length=0.05,
33
- noise_ramp_length=0.75,
34
- regularize_noise_weight=1e5,
35
- verbose=False,
36
- device: torch.device,
37
- use_wandb=False,
38
- initial_w=None,
39
- image_log_step=global_config.image_rec_result_log_snapshot,
40
- w_name: str
41
- ):
42
- print(target.shape, G.img_channels, G.img_resolution, G.img_resolution//2)
43
- assert target.shape == (
44
- G.img_channels, G.img_resolution, G.img_resolution // 2)
45
-
46
- def logprint(*args):
47
- if verbose:
48
- print(*args)
49
-
50
- G = copy.deepcopy(G).eval().requires_grad_(
51
- False).to(device).float() # type: ignore
52
-
53
- # Compute w stats.
54
- logprint(
55
- f'Computing W midpoint and stddev using {w_avg_samples} samples...')
56
- z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
57
- w_samples = G.mapping(torch.from_numpy(
58
- z_samples).to(device), None) # [N, L, C]
59
- w_samples = w_samples[:, :1, :].cpu(
60
- ).numpy().astype(np.float32) # [N, 1, C]
61
- w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
62
- w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device)
63
- w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
64
-
65
- start_w = initial_w if initial_w is not None else w_avg
66
-
67
- # Setup noise inputs.
68
- noise_bufs = {name: buf for (
69
- name, buf) in G.synthesis.named_buffers() if 'noise_const' in name}
70
-
71
- # Load VGG16 feature detector.
72
- url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
73
- with dnnlib.util.open_url(url) as f:
74
- vgg16 = torch.jit.load(f).eval().to(device)
75
-
76
- # Features for target image.
77
- target_images = target.unsqueeze(0).to(device).to(torch.float32)
78
- if target_images.shape[2] > 256:
79
- target_images = F.interpolate(
80
- target_images, size=(256, 256), mode='area')
81
- target_features = vgg16(
82
- target_images, resize_images=False, return_lpips=True)
83
-
84
- w_opt = torch.tensor(start_w, dtype=torch.float32, device=device,
85
- requires_grad=True) # pylint: disable=not-callable
86
- optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999),
87
- lr=hyperparameters.first_inv_lr)
88
-
89
- # Init noise.
90
- for buf in noise_bufs.values():
91
- buf[:] = torch.randn_like(buf)
92
- buf.requires_grad = True
93
-
94
- for step in range(num_steps):
95
-
96
- # Learning rate schedule.
97
- t = step / num_steps
98
- w_noise_scale = w_std * initial_noise_factor * \
99
- max(0.0, 1.0 - t / noise_ramp_length) ** 2
100
- lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
101
- lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
102
- lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
103
- lr = initial_learning_rate * lr_ramp
104
- for param_group in optimizer.param_groups:
105
- param_group['lr'] = lr
106
-
107
- # Synth images from opt_w.
108
- w_noise = torch.randn_like(w_opt) * w_noise_scale
109
- ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
110
- synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True)
111
-
112
- # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
113
- synth_images = (synth_images + 1) * (255 / 2)
114
- if synth_images.shape[2] > 256:
115
- synth_images = F.interpolate(
116
- synth_images, size=(256, 256), mode='area')
117
-
118
- # Features for synth images.
119
- synth_features = vgg16(
120
- synth_images, resize_images=False, return_lpips=True)
121
- dist = (target_features - synth_features).square().sum()
122
-
123
- # Noise regularization.
124
- reg_loss = 0.0
125
- for v in noise_bufs.values():
126
- noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d()
127
- while True:
128
- reg_loss += (noise * torch.roll(noise,
129
- shifts=1, dims=3)).mean() ** 2
130
- reg_loss += (noise * torch.roll(noise,
131
- shifts=1, dims=2)).mean() ** 2
132
- if noise.shape[2] <= 8:
133
- break
134
- noise = F.avg_pool2d(noise, kernel_size=2)
135
- loss = dist + reg_loss * regularize_noise_weight
136
- if step % 10 == 0:
137
- print("project loss", step, loss.data)
138
- if step % image_log_step == 0:
139
- with torch.no_grad():
140
- if use_wandb:
141
- global_config.training_step += 1
142
- wandb.log({f'first projection _{w_name}': loss.detach(
143
- ).cpu()}, step=global_config.training_step)
144
- log_utils.log_image_from_w(w_opt.repeat(
145
- [1, G.mapping.num_ws, 1]), G, w_name)
146
-
147
- # Step
148
- optimizer.zero_grad(set_to_none=True)
149
- loss.backward()
150
- optimizer.step()
151
- logprint(
152
- f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
153
-
154
- # Normalize noise.
155
- with torch.no_grad():
156
- for buf in noise_bufs.values():
157
- buf -= buf.mean()
158
- buf *= buf.square().mean().rsqrt()
159
-
160
- del G
161
- return w_opt.repeat([1, 18, 1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/torch_utils/training_stats.py DELETED
@@ -1,283 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Facilities for reporting and collecting training statistics across
10
- multiple processes and devices. The interface is designed to minimize
11
- synchronization overhead as well as the amount of boilerplate in user
12
- code."""
13
-
14
- import re
15
- import numpy as np
16
- import torch
17
- import dnnlib
18
-
19
- from . import misc
20
-
21
- # ----------------------------------------------------------------------------
22
-
23
- _num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
24
- # Data type to use for initial per-tensor reduction.
25
- _reduce_dtype = torch.float32
26
- _counter_dtype = torch.float64 # Data type to use for the internal counters.
27
- _rank = 0 # Rank of the current process.
28
- # Device to use for multiprocess communication. None = single-process.
29
- _sync_device = None
30
- _sync_called = False # Has _sync() been called yet?
31
- # Running counters on each device, updated by report(): name => device => torch.Tensor
32
- _counters = dict()
33
- # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
34
- _cumulative = dict()
35
-
36
- # ----------------------------------------------------------------------------
37
-
38
-
39
- def init_multiprocessing(rank, sync_device):
40
- r"""Initializes `torch_utils.training_stats` for collecting statistics
41
- across multiple processes.
42
-
43
- This function must be called after
44
- `torch.distributed.init_process_group()` and before `Collector.update()`.
45
- The call is not necessary if multi-process collection is not needed.
46
-
47
- Args:
48
- rank: Rank of the current process.
49
- sync_device: PyTorch device to use for inter-process
50
- communication, or None to disable multi-process
51
- collection. Typically `torch.device('cuda', rank)`.
52
- """
53
- global _rank, _sync_device
54
- assert not _sync_called
55
- _rank = rank
56
- _sync_device = sync_device
57
-
58
- # ----------------------------------------------------------------------------
59
-
60
-
61
- @misc.profiled_function
62
- def report(name, value):
63
- r"""Broadcasts the given set of scalars to all interested instances of
64
- `Collector`, across device and process boundaries.
65
-
66
- This function is expected to be extremely cheap and can be safely
67
- called from anywhere in the training loop, loss function, or inside a
68
- `torch.nn.Module`.
69
-
70
- Warning: The current implementation expects the set of unique names to
71
- be consistent across processes. Please make sure that `report()` is
72
- called at least once for each unique name by each process, and in the
73
- same order. If a given process has no scalars to broadcast, it can do
74
- `report(name, [])` (empty list).
75
-
76
- Args:
77
- name: Arbitrary string specifying the name of the statistic.
78
- Averages are accumulated separately for each unique name.
79
- value: Arbitrary set of scalars. Can be a list, tuple,
80
- NumPy array, PyTorch tensor, or Python scalar.
81
-
82
- Returns:
83
- The same `value` that was passed in.
84
- """
85
- if name not in _counters:
86
- _counters[name] = dict()
87
-
88
- elems = torch.as_tensor(value)
89
- if elems.numel() == 0:
90
- return value
91
-
92
- elems = elems.detach().flatten().to(_reduce_dtype)
93
- moments = torch.stack([
94
- torch.ones_like(elems).sum(),
95
- elems.sum(),
96
- elems.square().sum(),
97
- ])
98
- assert moments.ndim == 1 and moments.shape[0] == _num_moments
99
- moments = moments.to(_counter_dtype)
100
-
101
- device = moments.device
102
- if device not in _counters[name]:
103
- _counters[name][device] = torch.zeros_like(moments)
104
- _counters[name][device].add_(moments)
105
- return value
106
-
107
- # ----------------------------------------------------------------------------
108
-
109
-
110
- def report0(name, value):
111
- r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
112
- but ignores any scalars provided by the other processes.
113
- See `report()` for further details.
114
- """
115
- report(name, value if _rank == 0 else [])
116
- return value
117
-
118
- # ----------------------------------------------------------------------------
119
-
120
-
121
- class Collector:
122
- r"""Collects the scalars broadcasted by `report()` and `report0()` and
123
- computes their long-term averages (mean and standard deviation) over
124
- user-defined periods of time.
125
-
126
- The averages are first collected into internal counters that are not
127
- directly visible to the user. They are then copied to the user-visible
128
- state as a result of calling `update()` and can then be queried using
129
- `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
130
- internal counters for the next round, so that the user-visible state
131
- effectively reflects averages collected between the last two calls to
132
- `update()`.
133
-
134
- Args:
135
- regex: Regular expression defining which statistics to
136
- collect. The default is to collect everything.
137
- keep_previous: Whether to retain the previous averages if no
138
- scalars were collected on a given round
139
- (default: True).
140
- """
141
-
142
- def __init__(self, regex='.*', keep_previous=True):
143
- self._regex = re.compile(regex)
144
- self._keep_previous = keep_previous
145
- self._cumulative = dict()
146
- self._moments = dict()
147
- self.update()
148
- self._moments.clear()
149
-
150
- def names(self):
151
- r"""Returns the names of all statistics broadcasted so far that
152
- match the regular expression specified at construction time.
153
- """
154
- return [name for name in _counters if self._regex.fullmatch(name)]
155
-
156
- def update(self):
157
- r"""Copies current values of the internal counters to the
158
- user-visible state and resets them for the next round.
159
-
160
- If `keep_previous=True` was specified at construction time, the
161
- operation is skipped for statistics that have received no scalars
162
- since the last update, retaining their previous averages.
163
-
164
- This method performs a number of GPU-to-CPU transfers and one
165
- `torch.distributed.all_reduce()`. It is intended to be called
166
- periodically in the main training loop, typically once every
167
- N training steps.
168
- """
169
- if not self._keep_previous:
170
- self._moments.clear()
171
- for name, cumulative in _sync(self.names()):
172
- if name not in self._cumulative:
173
- self._cumulative[name] = torch.zeros(
174
- [_num_moments], dtype=_counter_dtype)
175
- delta = cumulative - self._cumulative[name]
176
- self._cumulative[name].copy_(cumulative)
177
- if float(delta[0]) != 0:
178
- self._moments[name] = delta
179
-
180
- def _get_delta(self, name):
181
- r"""Returns the raw moments that were accumulated for the given
182
- statistic between the last two calls to `update()`, or zero if
183
- no scalars were collected.
184
- """
185
- assert self._regex.fullmatch(name)
186
- if name not in self._moments:
187
- self._moments[name] = torch.zeros(
188
- [_num_moments], dtype=_counter_dtype)
189
- return self._moments[name]
190
-
191
- def num(self, name):
192
- r"""Returns the number of scalars that were accumulated for the given
193
- statistic between the last two calls to `update()`, or zero if
194
- no scalars were collected.
195
- """
196
- delta = self._get_delta(name)
197
- return int(delta[0])
198
-
199
- def mean(self, name):
200
- r"""Returns the mean of the scalars that were accumulated for the
201
- given statistic between the last two calls to `update()`, or NaN if
202
- no scalars were collected.
203
- """
204
- delta = self._get_delta(name)
205
- if int(delta[0]) == 0:
206
- return float('nan')
207
- return float(delta[1] / delta[0])
208
-
209
- def std(self, name):
210
- r"""Returns the standard deviation of the scalars that were
211
- accumulated for the given statistic between the last two calls to
212
- `update()`, or NaN if no scalars were collected.
213
- """
214
- delta = self._get_delta(name)
215
- if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
216
- return float('nan')
217
- if int(delta[0]) == 1:
218
- return float(0)
219
- mean = float(delta[1] / delta[0])
220
- raw_var = float(delta[2] / delta[0])
221
- return np.sqrt(max(raw_var - np.square(mean), 0))
222
-
223
- def as_dict(self):
224
- r"""Returns the averages accumulated between the last two calls to
225
- `update()` as an `dnnlib.EasyDict`. The contents are as follows:
226
-
227
- dnnlib.EasyDict(
228
- NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
229
- ...
230
- )
231
- """
232
- stats = dnnlib.EasyDict()
233
- for name in self.names():
234
- stats[name] = dnnlib.EasyDict(num=self.num(
235
- name), mean=self.mean(name), std=self.std(name))
236
- return stats
237
-
238
- def __getitem__(self, name):
239
- r"""Convenience getter.
240
- `collector[name]` is a synonym for `collector.mean(name)`.
241
- """
242
- return self.mean(name)
243
-
244
- # ----------------------------------------------------------------------------
245
-
246
-
247
- def _sync(names):
248
- r"""Synchronize the global cumulative counters across devices and
249
- processes. Called internally by `Collector.update()`.
250
- """
251
- if len(names) == 0:
252
- return []
253
- global _sync_called
254
- _sync_called = True
255
-
256
- # Collect deltas within current rank.
257
- deltas = []
258
- device = _sync_device if _sync_device is not None else torch.device('cpu')
259
- for name in names:
260
- delta = torch.zeros(
261
- [_num_moments], dtype=_counter_dtype, device=device)
262
- for counter in _counters[name].values():
263
- delta.add_(counter.to(device))
264
- counter.copy_(torch.zeros_like(counter))
265
- deltas.append(delta)
266
- deltas = torch.stack(deltas)
267
-
268
- # Sum deltas across ranks.
269
- if _sync_device is not None:
270
- torch.distributed.all_reduce(deltas)
271
-
272
- # Update cumulative values.
273
- deltas = deltas.cpu()
274
- for idx, name in enumerate(names):
275
- if name not in _cumulative:
276
- _cumulative[name] = torch.zeros(
277
- [_num_moments], dtype=_counter_dtype)
278
- _cumulative[name].add_(deltas[idx])
279
-
280
- # Return name-value pairs.
281
- return [(name, _cumulative[name]) for name in names]
282
-
283
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/colossalai/train_dreambooth_colossalai.py DELETED
@@ -1,673 +0,0 @@
1
- import argparse
2
- import hashlib
3
- import math
4
- import os
5
- from pathlib import Path
6
-
7
- import colossalai
8
- import torch
9
- import torch.nn.functional as F
10
- import torch.utils.checkpoint
11
- from colossalai.context.parallel_mode import ParallelMode
12
- from colossalai.core import global_context as gpc
13
- from colossalai.logging import disable_existing_loggers, get_dist_logger
14
- from colossalai.nn.optimizer.gemini_optimizer import GeminiAdamOptimizer
15
- from colossalai.nn.parallel.utils import get_static_torch_model
16
- from colossalai.utils import get_current_device
17
- from colossalai.utils.model.colo_init_context import ColoInitContext
18
- from huggingface_hub import create_repo, upload_folder
19
- from PIL import Image
20
- from torch.utils.data import Dataset
21
- from torchvision import transforms
22
- from tqdm.auto import tqdm
23
- from transformers import AutoTokenizer, PretrainedConfig
24
-
25
- from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
26
- from diffusers.optimization import get_scheduler
27
-
28
-
29
- disable_existing_loggers()
30
- logger = get_dist_logger()
31
-
32
-
33
- def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str):
34
- text_encoder_config = PretrainedConfig.from_pretrained(
35
- pretrained_model_name_or_path,
36
- subfolder="text_encoder",
37
- revision=args.revision,
38
- )
39
- model_class = text_encoder_config.architectures[0]
40
-
41
- if model_class == "CLIPTextModel":
42
- from transformers import CLIPTextModel
43
-
44
- return CLIPTextModel
45
- elif model_class == "RobertaSeriesModelWithTransformation":
46
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
47
-
48
- return RobertaSeriesModelWithTransformation
49
- else:
50
- raise ValueError(f"{model_class} is not supported.")
51
-
52
-
53
- def parse_args(input_args=None):
54
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
55
- parser.add_argument(
56
- "--pretrained_model_name_or_path",
57
- type=str,
58
- default=None,
59
- required=True,
60
- help="Path to pretrained model or model identifier from huggingface.co/models.",
61
- )
62
- parser.add_argument(
63
- "--revision",
64
- type=str,
65
- default=None,
66
- required=False,
67
- help="Revision of pretrained model identifier from huggingface.co/models.",
68
- )
69
- parser.add_argument(
70
- "--tokenizer_name",
71
- type=str,
72
- default=None,
73
- help="Pretrained tokenizer name or path if not the same as model_name",
74
- )
75
- parser.add_argument(
76
- "--instance_data_dir",
77
- type=str,
78
- default=None,
79
- required=True,
80
- help="A folder containing the training data of instance images.",
81
- )
82
- parser.add_argument(
83
- "--class_data_dir",
84
- type=str,
85
- default=None,
86
- required=False,
87
- help="A folder containing the training data of class images.",
88
- )
89
- parser.add_argument(
90
- "--instance_prompt",
91
- type=str,
92
- default="a photo of sks dog",
93
- required=False,
94
- help="The prompt with identifier specifying the instance",
95
- )
96
- parser.add_argument(
97
- "--class_prompt",
98
- type=str,
99
- default=None,
100
- help="The prompt to specify images in the same class as provided instance images.",
101
- )
102
- parser.add_argument(
103
- "--with_prior_preservation",
104
- default=False,
105
- action="store_true",
106
- help="Flag to add prior preservation loss.",
107
- )
108
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
109
- parser.add_argument(
110
- "--num_class_images",
111
- type=int,
112
- default=100,
113
- help=(
114
- "Minimal class images for prior preservation loss. If there are not enough images already present in"
115
- " class_data_dir, additional images will be sampled with class_prompt."
116
- ),
117
- )
118
- parser.add_argument(
119
- "--output_dir",
120
- type=str,
121
- default="text-inversion-model",
122
- help="The output directory where the model predictions and checkpoints will be written.",
123
- )
124
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
125
- parser.add_argument(
126
- "--resolution",
127
- type=int,
128
- default=512,
129
- help=(
130
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
131
- " resolution"
132
- ),
133
- )
134
- parser.add_argument(
135
- "--placement",
136
- type=str,
137
- default="cpu",
138
- help="Placement Policy for Gemini. Valid when using colossalai as dist plan.",
139
- )
140
- parser.add_argument(
141
- "--center_crop",
142
- default=False,
143
- action="store_true",
144
- help=(
145
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
146
- " cropped. The images will be resized to the resolution first before cropping."
147
- ),
148
- )
149
- parser.add_argument(
150
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
151
- )
152
- parser.add_argument(
153
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
154
- )
155
- parser.add_argument("--num_train_epochs", type=int, default=1)
156
- parser.add_argument(
157
- "--max_train_steps",
158
- type=int,
159
- default=None,
160
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
161
- )
162
- parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
163
- parser.add_argument(
164
- "--gradient_checkpointing",
165
- action="store_true",
166
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
167
- )
168
- parser.add_argument(
169
- "--learning_rate",
170
- type=float,
171
- default=5e-6,
172
- help="Initial learning rate (after the potential warmup period) to use.",
173
- )
174
- parser.add_argument(
175
- "--scale_lr",
176
- action="store_true",
177
- default=False,
178
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
179
- )
180
- parser.add_argument(
181
- "--lr_scheduler",
182
- type=str,
183
- default="constant",
184
- help=(
185
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
186
- ' "constant", "constant_with_warmup"]'
187
- ),
188
- )
189
- parser.add_argument(
190
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
191
- )
192
- parser.add_argument(
193
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
194
- )
195
-
196
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
197
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
198
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
199
- parser.add_argument(
200
- "--hub_model_id",
201
- type=str,
202
- default=None,
203
- help="The name of the repository to keep in sync with the local `output_dir`.",
204
- )
205
- parser.add_argument(
206
- "--logging_dir",
207
- type=str,
208
- default="logs",
209
- help=(
210
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
211
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
212
- ),
213
- )
214
- parser.add_argument(
215
- "--mixed_precision",
216
- type=str,
217
- default=None,
218
- choices=["no", "fp16", "bf16"],
219
- help=(
220
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
221
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
222
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
223
- ),
224
- )
225
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
226
-
227
- if input_args is not None:
228
- args = parser.parse_args(input_args)
229
- else:
230
- args = parser.parse_args()
231
-
232
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
233
- if env_local_rank != -1 and env_local_rank != args.local_rank:
234
- args.local_rank = env_local_rank
235
-
236
- if args.with_prior_preservation:
237
- if args.class_data_dir is None:
238
- raise ValueError("You must specify a data directory for class images.")
239
- if args.class_prompt is None:
240
- raise ValueError("You must specify prompt for class images.")
241
- else:
242
- if args.class_data_dir is not None:
243
- logger.warning("You need not use --class_data_dir without --with_prior_preservation.")
244
- if args.class_prompt is not None:
245
- logger.warning("You need not use --class_prompt without --with_prior_preservation.")
246
-
247
- return args
248
-
249
-
250
- class DreamBoothDataset(Dataset):
251
- """
252
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
253
- It pre-processes the images and the tokenizes prompts.
254
- """
255
-
256
- def __init__(
257
- self,
258
- instance_data_root,
259
- instance_prompt,
260
- tokenizer,
261
- class_data_root=None,
262
- class_prompt=None,
263
- size=512,
264
- center_crop=False,
265
- ):
266
- self.size = size
267
- self.center_crop = center_crop
268
- self.tokenizer = tokenizer
269
-
270
- self.instance_data_root = Path(instance_data_root)
271
- if not self.instance_data_root.exists():
272
- raise ValueError("Instance images root doesn't exists.")
273
-
274
- self.instance_images_path = list(Path(instance_data_root).iterdir())
275
- self.num_instance_images = len(self.instance_images_path)
276
- self.instance_prompt = instance_prompt
277
- self._length = self.num_instance_images
278
-
279
- if class_data_root is not None:
280
- self.class_data_root = Path(class_data_root)
281
- self.class_data_root.mkdir(parents=True, exist_ok=True)
282
- self.class_images_path = list(self.class_data_root.iterdir())
283
- self.num_class_images = len(self.class_images_path)
284
- self._length = max(self.num_class_images, self.num_instance_images)
285
- self.class_prompt = class_prompt
286
- else:
287
- self.class_data_root = None
288
-
289
- self.image_transforms = transforms.Compose(
290
- [
291
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
292
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
293
- transforms.ToTensor(),
294
- transforms.Normalize([0.5], [0.5]),
295
- ]
296
- )
297
-
298
- def __len__(self):
299
- return self._length
300
-
301
- def __getitem__(self, index):
302
- example = {}
303
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
304
- if not instance_image.mode == "RGB":
305
- instance_image = instance_image.convert("RGB")
306
- example["instance_images"] = self.image_transforms(instance_image)
307
- example["instance_prompt_ids"] = self.tokenizer(
308
- self.instance_prompt,
309
- padding="do_not_pad",
310
- truncation=True,
311
- max_length=self.tokenizer.model_max_length,
312
- ).input_ids
313
-
314
- if self.class_data_root:
315
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
316
- if not class_image.mode == "RGB":
317
- class_image = class_image.convert("RGB")
318
- example["class_images"] = self.image_transforms(class_image)
319
- example["class_prompt_ids"] = self.tokenizer(
320
- self.class_prompt,
321
- padding="do_not_pad",
322
- truncation=True,
323
- max_length=self.tokenizer.model_max_length,
324
- ).input_ids
325
-
326
- return example
327
-
328
-
329
- class PromptDataset(Dataset):
330
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
331
-
332
- def __init__(self, prompt, num_samples):
333
- self.prompt = prompt
334
- self.num_samples = num_samples
335
-
336
- def __len__(self):
337
- return self.num_samples
338
-
339
- def __getitem__(self, index):
340
- example = {}
341
- example["prompt"] = self.prompt
342
- example["index"] = index
343
- return example
344
-
345
-
346
- # Gemini + ZeRO DDP
347
- def gemini_zero_dpp(model: torch.nn.Module, placememt_policy: str = "auto"):
348
- from colossalai.nn.parallel import GeminiDDP
349
-
350
- model = GeminiDDP(
351
- model, device=get_current_device(), placement_policy=placememt_policy, pin_memory=True, search_range_mb=64
352
- )
353
- return model
354
-
355
-
356
- def main(args):
357
- if args.seed is None:
358
- colossalai.launch_from_torch(config={})
359
- else:
360
- colossalai.launch_from_torch(config={}, seed=args.seed)
361
-
362
- local_rank = gpc.get_local_rank(ParallelMode.DATA)
363
- world_size = gpc.get_world_size(ParallelMode.DATA)
364
-
365
- if args.with_prior_preservation:
366
- class_images_dir = Path(args.class_data_dir)
367
- if not class_images_dir.exists():
368
- class_images_dir.mkdir(parents=True)
369
- cur_class_images = len(list(class_images_dir.iterdir()))
370
-
371
- if cur_class_images < args.num_class_images:
372
- torch_dtype = torch.float16 if get_current_device() == "cuda" else torch.float32
373
- pipeline = DiffusionPipeline.from_pretrained(
374
- args.pretrained_model_name_or_path,
375
- torch_dtype=torch_dtype,
376
- safety_checker=None,
377
- revision=args.revision,
378
- )
379
- pipeline.set_progress_bar_config(disable=True)
380
-
381
- num_new_images = args.num_class_images - cur_class_images
382
- logger.info(f"Number of class images to sample: {num_new_images}.")
383
-
384
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
385
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
386
-
387
- pipeline.to(get_current_device())
388
-
389
- for example in tqdm(
390
- sample_dataloader,
391
- desc="Generating class images",
392
- disable=not local_rank == 0,
393
- ):
394
- images = pipeline(example["prompt"]).images
395
-
396
- for i, image in enumerate(images):
397
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
398
- image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
399
- image.save(image_filename)
400
-
401
- del pipeline
402
-
403
- # Handle the repository creation
404
- if local_rank == 0:
405
- if args.output_dir is not None:
406
- os.makedirs(args.output_dir, exist_ok=True)
407
-
408
- if args.push_to_hub:
409
- repo_id = create_repo(
410
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
411
- ).repo_id
412
-
413
- # Load the tokenizer
414
- if args.tokenizer_name:
415
- logger.info(f"Loading tokenizer from {args.tokenizer_name}", ranks=[0])
416
- tokenizer = AutoTokenizer.from_pretrained(
417
- args.tokenizer_name,
418
- revision=args.revision,
419
- use_fast=False,
420
- )
421
- elif args.pretrained_model_name_or_path:
422
- logger.info("Loading tokenizer from pretrained model", ranks=[0])
423
- tokenizer = AutoTokenizer.from_pretrained(
424
- args.pretrained_model_name_or_path,
425
- subfolder="tokenizer",
426
- revision=args.revision,
427
- use_fast=False,
428
- )
429
- # import correct text encoder class
430
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path)
431
-
432
- # Load models and create wrapper for stable diffusion
433
-
434
- logger.info(f"Loading text_encoder from {args.pretrained_model_name_or_path}", ranks=[0])
435
-
436
- text_encoder = text_encoder_cls.from_pretrained(
437
- args.pretrained_model_name_or_path,
438
- subfolder="text_encoder",
439
- revision=args.revision,
440
- )
441
-
442
- logger.info(f"Loading AutoencoderKL from {args.pretrained_model_name_or_path}", ranks=[0])
443
- vae = AutoencoderKL.from_pretrained(
444
- args.pretrained_model_name_or_path,
445
- subfolder="vae",
446
- revision=args.revision,
447
- )
448
-
449
- logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0])
450
- with ColoInitContext(device=get_current_device()):
451
- unet = UNet2DConditionModel.from_pretrained(
452
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, low_cpu_mem_usage=False
453
- )
454
-
455
- vae.requires_grad_(False)
456
- text_encoder.requires_grad_(False)
457
-
458
- if args.gradient_checkpointing:
459
- unet.enable_gradient_checkpointing()
460
-
461
- if args.scale_lr:
462
- args.learning_rate = args.learning_rate * args.train_batch_size * world_size
463
-
464
- unet = gemini_zero_dpp(unet, args.placement)
465
-
466
- # config optimizer for colossalai zero
467
- optimizer = GeminiAdamOptimizer(
468
- unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm
469
- )
470
-
471
- # load noise_scheduler
472
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
473
-
474
- # prepare dataset
475
- logger.info(f"Prepare dataset from {args.instance_data_dir}", ranks=[0])
476
- train_dataset = DreamBoothDataset(
477
- instance_data_root=args.instance_data_dir,
478
- instance_prompt=args.instance_prompt,
479
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
480
- class_prompt=args.class_prompt,
481
- tokenizer=tokenizer,
482
- size=args.resolution,
483
- center_crop=args.center_crop,
484
- )
485
-
486
- def collate_fn(examples):
487
- input_ids = [example["instance_prompt_ids"] for example in examples]
488
- pixel_values = [example["instance_images"] for example in examples]
489
-
490
- # Concat class and instance examples for prior preservation.
491
- # We do this to avoid doing two forward passes.
492
- if args.with_prior_preservation:
493
- input_ids += [example["class_prompt_ids"] for example in examples]
494
- pixel_values += [example["class_images"] for example in examples]
495
-
496
- pixel_values = torch.stack(pixel_values)
497
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
498
-
499
- input_ids = tokenizer.pad(
500
- {"input_ids": input_ids},
501
- padding="max_length",
502
- max_length=tokenizer.model_max_length,
503
- return_tensors="pt",
504
- ).input_ids
505
-
506
- batch = {
507
- "input_ids": input_ids,
508
- "pixel_values": pixel_values,
509
- }
510
- return batch
511
-
512
- train_dataloader = torch.utils.data.DataLoader(
513
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=1
514
- )
515
-
516
- # Scheduler and math around the number of training steps.
517
- overrode_max_train_steps = False
518
- num_update_steps_per_epoch = math.ceil(len(train_dataloader))
519
- if args.max_train_steps is None:
520
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
521
- overrode_max_train_steps = True
522
-
523
- lr_scheduler = get_scheduler(
524
- args.lr_scheduler,
525
- optimizer=optimizer,
526
- num_warmup_steps=args.lr_warmup_steps,
527
- num_training_steps=args.max_train_steps,
528
- )
529
- weight_dtype = torch.float32
530
- if args.mixed_precision == "fp16":
531
- weight_dtype = torch.float16
532
- elif args.mixed_precision == "bf16":
533
- weight_dtype = torch.bfloat16
534
-
535
- # Move text_encode and vae to gpu.
536
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
537
- # as these models are only used for inference, keeping weights in full precision is not required.
538
- vae.to(get_current_device(), dtype=weight_dtype)
539
- text_encoder.to(get_current_device(), dtype=weight_dtype)
540
-
541
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
542
- num_update_steps_per_epoch = math.ceil(len(train_dataloader))
543
- if overrode_max_train_steps:
544
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
545
- # Afterwards we recalculate our number of training epochs
546
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
547
-
548
- # Train!
549
- total_batch_size = args.train_batch_size * world_size
550
-
551
- logger.info("***** Running training *****", ranks=[0])
552
- logger.info(f" Num examples = {len(train_dataset)}", ranks=[0])
553
- logger.info(f" Num batches each epoch = {len(train_dataloader)}", ranks=[0])
554
- logger.info(f" Num Epochs = {args.num_train_epochs}", ranks=[0])
555
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}", ranks=[0])
556
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}", ranks=[0])
557
- logger.info(f" Total optimization steps = {args.max_train_steps}", ranks=[0])
558
-
559
- # Only show the progress bar once on each machine.
560
- progress_bar = tqdm(range(args.max_train_steps), disable=not local_rank == 0)
561
- progress_bar.set_description("Steps")
562
- global_step = 0
563
-
564
- torch.cuda.synchronize()
565
- for epoch in range(args.num_train_epochs):
566
- unet.train()
567
- for step, batch in enumerate(train_dataloader):
568
- torch.cuda.reset_peak_memory_stats()
569
- # Move batch to gpu
570
- for key, value in batch.items():
571
- batch[key] = value.to(get_current_device(), non_blocking=True)
572
-
573
- # Convert images to latent space
574
- optimizer.zero_grad()
575
-
576
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
577
- latents = latents * 0.18215
578
-
579
- # Sample noise that we'll add to the latents
580
- noise = torch.randn_like(latents)
581
- bsz = latents.shape[0]
582
- # Sample a random timestep for each image
583
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
584
- timesteps = timesteps.long()
585
-
586
- # Add noise to the latents according to the noise magnitude at each timestep
587
- # (this is the forward diffusion process)
588
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
589
-
590
- # Get the text embedding for conditioning
591
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
592
-
593
- # Predict the noise residual
594
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
595
-
596
- # Get the target for loss depending on the prediction type
597
- if noise_scheduler.config.prediction_type == "epsilon":
598
- target = noise
599
- elif noise_scheduler.config.prediction_type == "v_prediction":
600
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
601
- else:
602
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
603
-
604
- if args.with_prior_preservation:
605
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
606
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
607
- target, target_prior = torch.chunk(target, 2, dim=0)
608
-
609
- # Compute instance loss
610
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
611
-
612
- # Compute prior loss
613
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
614
-
615
- # Add the prior loss to the instance loss.
616
- loss = loss + args.prior_loss_weight * prior_loss
617
- else:
618
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
619
-
620
- optimizer.backward(loss)
621
-
622
- optimizer.step()
623
- lr_scheduler.step()
624
- logger.info(f"max GPU_mem cost is {torch.cuda.max_memory_allocated()/2**20} MB", ranks=[0])
625
- # Checks if the accelerator has performed an optimization step behind the scenes
626
- progress_bar.update(1)
627
- global_step += 1
628
- logs = {
629
- "loss": loss.detach().item(),
630
- "lr": optimizer.param_groups[0]["lr"],
631
- } # lr_scheduler.get_last_lr()[0]}
632
- progress_bar.set_postfix(**logs)
633
-
634
- if global_step % args.save_steps == 0:
635
- torch.cuda.synchronize()
636
- torch_unet = get_static_torch_model(unet)
637
- if local_rank == 0:
638
- pipeline = DiffusionPipeline.from_pretrained(
639
- args.pretrained_model_name_or_path,
640
- unet=torch_unet,
641
- revision=args.revision,
642
- )
643
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
644
- pipeline.save_pretrained(save_path)
645
- logger.info(f"Saving model checkpoint to {save_path}", ranks=[0])
646
- if global_step >= args.max_train_steps:
647
- break
648
-
649
- torch.cuda.synchronize()
650
- unet = get_static_torch_model(unet)
651
-
652
- if local_rank == 0:
653
- pipeline = DiffusionPipeline.from_pretrained(
654
- args.pretrained_model_name_or_path,
655
- unet=unet,
656
- revision=args.revision,
657
- )
658
-
659
- pipeline.save_pretrained(args.output_dir)
660
- logger.info(f"Saving model checkpoint to {args.output_dir}", ranks=[0])
661
-
662
- if args.push_to_hub:
663
- upload_folder(
664
- repo_id=repo_id,
665
- folder_path=args.output_dir,
666
- commit_message="End of training",
667
- ignore_patterns=["step_*", "epoch_*"],
668
- )
669
-
670
-
671
- if __name__ == "__main__":
672
- args = parse_args()
673
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_unclip_txt2img_to_image_variation.py DELETED
@@ -1,41 +0,0 @@
1
- import argparse
2
-
3
- from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
4
-
5
- from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
6
-
7
-
8
- if __name__ == "__main__":
9
- parser = argparse.ArgumentParser()
10
-
11
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
12
-
13
- parser.add_argument(
14
- "--txt2img_unclip",
15
- default="kakaobrain/karlo-v1-alpha",
16
- type=str,
17
- required=False,
18
- help="The pretrained txt2img unclip.",
19
- )
20
-
21
- args = parser.parse_args()
22
-
23
- txt2img = UnCLIPPipeline.from_pretrained(args.txt2img_unclip)
24
-
25
- feature_extractor = CLIPImageProcessor()
26
- image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
27
-
28
- img2img = UnCLIPImageVariationPipeline(
29
- decoder=txt2img.decoder,
30
- text_encoder=txt2img.text_encoder,
31
- tokenizer=txt2img.tokenizer,
32
- text_proj=txt2img.text_proj,
33
- feature_extractor=feature_extractor,
34
- image_encoder=image_encoder,
35
- super_res_first=txt2img.super_res_first,
36
- super_res_last=txt2img.super_res_last,
37
- decoder_scheduler=txt2img.decoder_scheduler,
38
- super_res_scheduler=txt2img.super_res_scheduler,
39
- )
40
-
41
- img2img.save_pretrained(args.dump_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/UniFormerV2_mit_demo/uniformerv2.py DELETED
@@ -1,510 +0,0 @@
1
- #!/usr/bin/env python
2
- import os
3
- from collections import OrderedDict
4
-
5
- from timm.models.layers import DropPath
6
- import torch
7
- from torch import nn
8
- from torch.nn import MultiheadAttention
9
- import torch.nn.functional as F
10
- import torch.utils.checkpoint as checkpoint
11
-
12
-
13
- MODEL_PATH = './'
14
- _MODELS = {
15
- "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
16
- "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
17
- "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
18
- }
19
-
20
-
21
- class LayerNorm(nn.LayerNorm):
22
- """Subclass torch's LayerNorm to handle fp16."""
23
-
24
- def forward(self, x):
25
- orig_type = x.dtype
26
- ret = super().forward(x.type(torch.float32))
27
- return ret.type(orig_type)
28
-
29
-
30
- class QuickGELU(nn.Module):
31
- def forward(self, x):
32
- return x * torch.sigmoid(1.702 * x)
33
-
34
-
35
- class Local_MHRA(nn.Module):
36
- def __init__(self, d_model, dw_reduction=1.5, pos_kernel_size=3):
37
- super().__init__()
38
-
39
- padding = pos_kernel_size // 2
40
- re_d_model = int(d_model // dw_reduction)
41
- self.pos_embed = nn.Sequential(
42
- nn.BatchNorm3d(d_model),
43
- nn.Conv3d(d_model, re_d_model, kernel_size=1, stride=1, padding=0),
44
- nn.Conv3d(re_d_model, re_d_model, kernel_size=(pos_kernel_size, 1, 1), stride=(1, 1, 1), padding=(padding, 0, 0), groups=re_d_model),
45
- nn.Conv3d(re_d_model, d_model, kernel_size=1, stride=1, padding=0),
46
- )
47
-
48
- # init zero
49
- print('Init zero for Conv in pos_emb')
50
- nn.init.constant_(self.pos_embed[3].weight, 0)
51
- nn.init.constant_(self.pos_embed[3].bias, 0)
52
-
53
- def forward(self, x):
54
- return self.pos_embed(x)
55
-
56
-
57
- class ResidualAttentionBlock(nn.Module):
58
- def __init__(
59
- self, d_model, n_head, attn_mask=None, drop_path=0.0,
60
- dw_reduction=1.5, no_lmhra=False, double_lmhra=True
61
- ):
62
- super().__init__()
63
-
64
- self.n_head = n_head
65
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
66
- print(f'Drop path rate: {drop_path}')
67
-
68
- self.no_lmhra = no_lmhra
69
- self.double_lmhra = double_lmhra
70
- print(f'No L_MHRA: {no_lmhra}')
71
- print(f'Double L_MHRA: {double_lmhra}')
72
- if not no_lmhra:
73
- self.lmhra1 = Local_MHRA(d_model, dw_reduction=dw_reduction)
74
- if double_lmhra:
75
- self.lmhra2 = Local_MHRA(d_model, dw_reduction=dw_reduction)
76
-
77
- # spatial
78
- self.attn = MultiheadAttention(d_model, n_head)
79
- self.ln_1 = LayerNorm(d_model)
80
- self.mlp = nn.Sequential(OrderedDict([
81
- ("c_fc", nn.Linear(d_model, d_model * 4)),
82
- ("gelu", QuickGELU()),
83
- ("c_proj", nn.Linear(d_model * 4, d_model))
84
- ]))
85
- self.ln_2 = LayerNorm(d_model)
86
- self.attn_mask = attn_mask
87
-
88
- def attention(self, x):
89
- self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
90
- return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
91
-
92
- def forward(self, x, T=8, use_checkpoint=False):
93
- # x: 1+HW, NT, C
94
- if not self.no_lmhra:
95
- # Local MHRA
96
- tmp_x = x[1:, :, :]
97
- L, NT, C = tmp_x.shape
98
- N = NT // T
99
- H = W = int(L ** 0.5)
100
- tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
101
- tmp_x = tmp_x + self.drop_path(self.lmhra1(tmp_x))
102
- tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
103
- x = torch.cat([x[:1, :, :], tmp_x], dim=0)
104
- # MHSA
105
- if use_checkpoint:
106
- attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x))
107
- x = x + self.drop_path(attn_out)
108
- else:
109
- x = x + self.drop_path(self.attention(self.ln_1(x)))
110
- # Local MHRA
111
- if not self.no_lmhra and self.double_lmhra:
112
- tmp_x = x[1:, :, :]
113
- tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
114
- tmp_x = tmp_x + self.drop_path(self.lmhra2(tmp_x))
115
- tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
116
- x = torch.cat([x[:1, :, :], tmp_x], dim=0)
117
- # FFN
118
- if use_checkpoint:
119
- mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x))
120
- x = x + self.drop_path(mlp_out)
121
- else:
122
- x = x + self.drop_path(self.mlp(self.ln_2(x)))
123
- return x
124
-
125
-
126
- class Extractor(nn.Module):
127
- def __init__(
128
- self, d_model, n_head, attn_mask=None,
129
- mlp_factor=4.0, dropout=0.0, drop_path=0.0,
130
- ):
131
- super().__init__()
132
-
133
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
134
- print(f'Drop path rate: {drop_path}')
135
- self.attn = nn.MultiheadAttention(d_model, n_head)
136
- self.ln_1 = nn.LayerNorm(d_model)
137
- d_mlp = round(mlp_factor * d_model)
138
- self.mlp = nn.Sequential(OrderedDict([
139
- ("c_fc", nn.Linear(d_model, d_mlp)),
140
- ("gelu", QuickGELU()),
141
- ("dropout", nn.Dropout(dropout)),
142
- ("c_proj", nn.Linear(d_mlp, d_model))
143
- ]))
144
- self.ln_2 = nn.LayerNorm(d_model)
145
- self.ln_3 = nn.LayerNorm(d_model)
146
- self.attn_mask = attn_mask
147
-
148
- # zero init
149
- nn.init.xavier_uniform_(self.attn.in_proj_weight)
150
- nn.init.constant_(self.attn.out_proj.weight, 0.)
151
- nn.init.constant_(self.attn.out_proj.bias, 0.)
152
- nn.init.xavier_uniform_(self.mlp[0].weight)
153
- nn.init.constant_(self.mlp[-1].weight, 0.)
154
- nn.init.constant_(self.mlp[-1].bias, 0.)
155
-
156
- def attention(self, x, y):
157
- d_model = self.ln_1.weight.size(0)
158
- q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
159
-
160
- k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
161
- v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
162
- Tx, Ty, N = q.size(0), k.size(0), q.size(1)
163
- q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
164
- k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
165
- v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
166
- aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
167
-
168
- aff = aff.softmax(dim=-1)
169
- out = aff @ v
170
- out = out.permute(2, 0, 1, 3).flatten(2)
171
- out = self.attn.out_proj(out)
172
- return out
173
-
174
- def forward(self, x, y):
175
- x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
176
- x = x + self.drop_path(self.mlp(self.ln_2(x)))
177
- return x
178
-
179
-
180
- class Transformer(nn.Module):
181
- def __init__(
182
- self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
183
- use_checkpoint=False, checkpoint_num=[0], t_size=8, dw_reduction=2,
184
- no_lmhra=False, double_lmhra=True,
185
- return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
186
- n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
187
- mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
188
- cls_dropout=0.5, num_classes=400,
189
- ):
190
- super().__init__()
191
- self.T = t_size
192
- self.return_list = return_list
193
- # backbone
194
- b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
195
- self.resblocks = nn.ModuleList([
196
- ResidualAttentionBlock(
197
- width, heads, attn_mask,
198
- drop_path=b_dpr[i],
199
- dw_reduction=dw_reduction,
200
- no_lmhra=no_lmhra,
201
- double_lmhra=double_lmhra,
202
- ) for i in range(layers)
203
- ])
204
- # checkpoint
205
- self.use_checkpoint = use_checkpoint
206
- self.checkpoint_num = checkpoint_num
207
- self.n_layers = n_layers
208
- print(f'Use checkpoint: {self.use_checkpoint}')
209
- print(f'Checkpoint number: {self.checkpoint_num}')
210
-
211
- # global block
212
- assert n_layers == len(return_list)
213
- if n_layers > 0:
214
- self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
215
- self.dpe = nn.ModuleList([
216
- nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
217
- for i in range(n_layers)
218
- ])
219
- for m in self.dpe:
220
- nn.init.constant_(m.bias, 0.)
221
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
222
- self.dec = nn.ModuleList([
223
- Extractor(
224
- n_dim, n_head, mlp_factor=mlp_factor,
225
- dropout=mlp_dropout[i], drop_path=dpr[i],
226
- ) for i in range(n_layers)
227
- ])
228
- self.balance = nn.Parameter(torch.zeros((n_dim)))
229
- self.sigmoid = nn.Sigmoid()
230
- # projection
231
- self.proj = nn.Sequential(
232
- nn.LayerNorm(n_dim),
233
- nn.Dropout(cls_dropout),
234
- nn.Linear(n_dim, num_classes),
235
- )
236
-
237
- def forward(self, x):
238
- T_down = self.T
239
- L, NT, C = x.shape
240
- N = NT // T_down
241
- H = W = int((L - 1) ** 0.5)
242
-
243
- if self.n_layers > 0:
244
- cls_token = self.temporal_cls_token.repeat(1, N, 1)
245
-
246
- j = -1
247
- for i, resblock in enumerate(self.resblocks):
248
- if self.use_checkpoint and i < self.checkpoint_num[0]:
249
- x = resblock(x, self.T, use_checkpoint=True)
250
- else:
251
- x = resblock(x, T_down)
252
- if i in self.return_list:
253
- j += 1
254
- tmp_x = x.clone()
255
- tmp_x = tmp_x.view(L, N, T_down, C)
256
- # dpe
257
- _, tmp_feats = tmp_x[:1], tmp_x[1:]
258
- tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
259
- tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1).contiguous()
260
- tmp_x[1:] = tmp_x[1:] + tmp_feats
261
- # global block
262
- tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
263
- cls_token = self.dec[j](cls_token, tmp_x)
264
-
265
- if self.n_layers > 0:
266
- weight = self.sigmoid(self.balance)
267
- residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
268
- return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
269
- else:
270
- residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
271
- return self.proj(residual)
272
-
273
-
274
- class VisionTransformer(nn.Module):
275
- def __init__(
276
- self,
277
- # backbone
278
- input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
279
- use_checkpoint=False, checkpoint_num=[0], t_size=8, kernel_size=3, dw_reduction=1.5,
280
- temporal_downsample=True,
281
- no_lmhra=-False, double_lmhra=True,
282
- # global block
283
- return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
284
- n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
285
- mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
286
- cls_dropout=0.5, num_classes=400,
287
- ):
288
- super().__init__()
289
- self.input_resolution = input_resolution
290
- self.output_dim = output_dim
291
- padding = (kernel_size - 1) // 2
292
- if temporal_downsample:
293
- self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False)
294
- t_size = t_size // 2
295
- else:
296
- self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
297
-
298
- scale = width ** -0.5
299
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
300
- self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
301
- self.ln_pre = LayerNorm(width)
302
-
303
- self.transformer = Transformer(
304
- width, layers, heads, dw_reduction=dw_reduction,
305
- backbone_drop_path_rate=backbone_drop_path_rate,
306
- use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
307
- no_lmhra=no_lmhra, double_lmhra=double_lmhra,
308
- return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
309
- mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
310
- cls_dropout=cls_dropout, num_classes=num_classes,
311
- )
312
-
313
- def forward(self, x):
314
- x = self.conv1(x) # shape = [*, width, grid, grid]
315
- N, C, T, H, W = x.shape
316
- x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
317
-
318
- x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
319
- x = x + self.positional_embedding.to(x.dtype)
320
- x = self.ln_pre(x)
321
-
322
- x = x.permute(1, 0, 2) # NLD -> LND
323
- out = self.transformer(x)
324
- return out
325
-
326
-
327
- def inflate_weight(weight_2d, time_dim, center=True):
328
- print(f'Init center: {center}')
329
- if center:
330
- weight_3d = torch.zeros(*weight_2d.shape)
331
- weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
332
- middle_idx = time_dim // 2
333
- weight_3d[:, :, middle_idx, :, :] = weight_2d
334
- else:
335
- weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
336
- weight_3d = weight_3d / time_dim
337
- return weight_3d
338
-
339
-
340
- def load_state_dict(model, state_dict):
341
- state_dict_3d = model.state_dict()
342
- for k in state_dict.keys():
343
- if state_dict[k].shape != state_dict_3d[k].shape:
344
- if len(state_dict_3d[k].shape) <= 2:
345
- print(f'Ignore: {k}')
346
- continue
347
- print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
348
- time_dim = state_dict_3d[k].shape[2]
349
- state_dict[k] = inflate_weight(state_dict[k], time_dim)
350
- model.load_state_dict(state_dict, strict=False)
351
-
352
-
353
- def uniformerv2_b16(
354
- pretrained=True, use_checkpoint=False, checkpoint_num=[0],
355
- t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
356
- temporal_downsample=True,
357
- no_lmhra=False, double_lmhra=True,
358
- return_list=[8, 9, 10, 11],
359
- n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
360
- mlp_dropout=[0.5, 0.5, 0.5, 0.5],
361
- cls_dropout=0.5, num_classes=400,
362
- ):
363
- model = VisionTransformer(
364
- input_resolution=224,
365
- patch_size=16,
366
- width=768,
367
- layers=12,
368
- heads=12,
369
- output_dim=512,
370
- use_checkpoint=use_checkpoint,
371
- checkpoint_num=checkpoint_num,
372
- t_size=t_size,
373
- dw_reduction=dw_reduction,
374
- backbone_drop_path_rate=backbone_drop_path_rate,
375
- temporal_downsample=temporal_downsample,
376
- no_lmhra=no_lmhra,
377
- double_lmhra=double_lmhra,
378
- return_list=return_list,
379
- n_layers=n_layers,
380
- n_dim=n_dim,
381
- n_head=n_head,
382
- mlp_factor=mlp_factor,
383
- drop_path_rate=drop_path_rate,
384
- mlp_dropout=mlp_dropout,
385
- cls_dropout=cls_dropout,
386
- num_classes=num_classes,
387
- )
388
-
389
- if pretrained:
390
- print('load pretrained weights')
391
- state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
392
- load_state_dict(model, state_dict)
393
- return model.eval()
394
-
395
-
396
- def uniformerv2_l14(
397
- pretrained=True, use_checkpoint=False, checkpoint_num=[0],
398
- t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
399
- temporal_downsample=True,
400
- no_lmhra=False, double_lmhra=True,
401
- return_list=[20, 21, 22, 23],
402
- n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
403
- mlp_dropout=[0.5, 0.5, 0.5, 0.5],
404
- cls_dropout=0.5, num_classes=400,
405
- ):
406
- model = VisionTransformer(
407
- input_resolution=224,
408
- patch_size=14,
409
- width=1024,
410
- layers=24,
411
- heads=16,
412
- output_dim=768,
413
- use_checkpoint=use_checkpoint,
414
- checkpoint_num=checkpoint_num,
415
- t_size=t_size,
416
- dw_reduction=dw_reduction,
417
- backbone_drop_path_rate=backbone_drop_path_rate,
418
- temporal_downsample=temporal_downsample,
419
- no_lmhra=no_lmhra,
420
- double_lmhra=double_lmhra,
421
- return_list=return_list,
422
- n_layers=n_layers,
423
- n_dim=n_dim,
424
- n_head=n_head,
425
- mlp_factor=mlp_factor,
426
- drop_path_rate=drop_path_rate,
427
- mlp_dropout=mlp_dropout,
428
- cls_dropout=cls_dropout,
429
- num_classes=num_classes,
430
- )
431
-
432
- if pretrained:
433
- print('load pretrained weights')
434
- state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
435
- load_state_dict(model, state_dict)
436
- return model.eval()
437
-
438
-
439
- def uniformerv2_l14_336(
440
- pretrained=True, use_checkpoint=False, checkpoint_num=[0],
441
- t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
442
- no_temporal_downsample=True,
443
- no_lmhra=False, double_lmhra=True,
444
- return_list=[20, 21, 22, 23],
445
- n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
446
- mlp_dropout=[0.5, 0.5, 0.5, 0.5],
447
- cls_dropout=0.5, num_classes=400,
448
- ):
449
- model = VisionTransformer(
450
- input_resolution=336,
451
- patch_size=14,
452
- width=1024,
453
- layers=24,
454
- heads=16,
455
- output_dim=768,
456
- use_checkpoint=use_checkpoint,
457
- checkpoint_num=checkpoint_num,
458
- t_size=t_size,
459
- dw_reduction=dw_reduction,
460
- backbone_drop_path_rate=backbone_drop_path_rate,
461
- no_temporal_downsample=no_temporal_downsample,
462
- no_lmhra=no_lmhra,
463
- double_lmhra=double_lmhra,
464
- return_list=return_list,
465
- n_layers=n_layers,
466
- n_dim=n_dim,
467
- n_head=n_head,
468
- mlp_factor=mlp_factor,
469
- drop_path_rate=drop_path_rate,
470
- mlp_dropout=mlp_dropout,
471
- cls_dropout=cls_dropout,
472
- num_classes=num_classes,
473
- )
474
-
475
- if pretrained:
476
- print('load pretrained weights')
477
- state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
478
- load_state_dict(model, state_dict)
479
- return model.eval()
480
-
481
-
482
- if __name__ == '__main__':
483
- import time
484
- from fvcore.nn import FlopCountAnalysis
485
- from fvcore.nn import flop_count_table
486
- import numpy as np
487
-
488
- seed = 4217
489
- np.random.seed(seed)
490
- torch.manual_seed(seed)
491
- torch.cuda.manual_seed(seed)
492
- torch.cuda.manual_seed_all(seed)
493
- num_frames = 16
494
-
495
- model = uniformerv2_l14(
496
- pretrained=False,
497
- t_size=num_frames, backbone_drop_path_rate=0., drop_path_rate=0.,
498
- dw_reduction=1.5,
499
- no_lmhra=False,
500
- temporal_downsample=True,
501
- return_list=[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
502
- mlp_dropout=[0.5]*16,
503
- n_layers=16
504
- )
505
- print(model)
506
-
507
- flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
508
- s = time.time()
509
- print(flop_count_table(flops, max_depth=1))
510
- print(time.time()-s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/regnet/README.md DELETED
@@ -1,96 +0,0 @@
1
- # Designing Network Design Spaces
2
-
3
- ## Introduction
4
-
5
- [BACKBONE]
6
-
7
- We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet.
8
-
9
- The pre-trained modles are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md).
10
-
11
- ```latex
12
- @article{radosavovic2020designing,
13
- title={Designing Network Design Spaces},
14
- author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár},
15
- year={2020},
16
- eprint={2003.13678},
17
- archivePrefix={arXiv},
18
- primaryClass={cs.CV}
19
- }
20
- ```
21
-
22
- ## Usage
23
-
24
- To use a regnet model, there are two steps to do:
25
-
26
- 1. Convert the model to ResNet-style supported by MMDetection
27
- 2. Modify backbone and neck in config accordingly
28
-
29
- ### Convert model
30
-
31
- We already prepare models of FLOPs from 400M to 12G in our model zoo.
32
-
33
- For more general usage, we also provide script `regnet2mmdet.py` in the tools directory to convert the key of models pretrained by [pycls](https://github.com/facebookresearch/pycls/) to
34
- ResNet-style checkpoints used in MMDetection.
35
-
36
- ```bash
37
- python -u tools/model_converters/regnet2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH}
38
- ```
39
-
40
- This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`.
41
-
42
- ### Modify config
43
-
44
- The users can modify the config's `depth` of backbone and corresponding keys in `arch` according to the configs in the [pycls model zoo](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md).
45
- The parameter `in_channels` in FPN can be found in the Figure 15 & 16 of the paper (`wi` in the legend).
46
- This directory already provides some configs with their performance, using RegNetX from 800MF to 12GF level.
47
- For other pre-trained models or self-implemented regnet models, the users are responsible to check these parameters by themselves.
48
-
49
- **Note**: Although Fig. 15 & 16 also provide `w0`, `wa`, `wm`, `group_w`, and `bot_mul` for `arch`, they are quantized thus inaccurate, using them sometimes produces different backbone that does not match the key in the pre-trained model.
50
-
51
- ## Results
52
-
53
- ### Mask R-CNN
54
-
55
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
56
- | :---------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
57
- | [R-50-FPN](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py)| pytorch | 1x | 4.4 | 12.0 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) |
58
- |[RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py)| pytorch | 1x |5.0 ||40.3|36.6|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) |
59
- |[RegNetX-4.0GF-FPN](./mask_rcnn_regnetx-4GF_fpn_1x_coco.py)| pytorch | 1x |5.5||41.5|37.4|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217.log.json) |
60
- | [R-101-FPN](../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py)| pytorch | 1x | 6.4 | 10.3 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) |
61
- |[RegNetX-6.4GF-FPN](./mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py)| pytorch | 1x |6.1 ||41.0|37.1|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439.log.json) |
62
- | [X-101-32x4d-FPN](../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | pytorch | 1x | 7.6 | 9.4 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) |
63
- |[RegNetX-8.0GF-FPN](./mask_rcnn_regnetx-8GF_fpn_1x_coco.py)| pytorch | 1x |6.4 ||41.7|37.5|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515.log.json) |
64
- |[RegNetX-12GF-FPN](./mask_rcnn_regnetx-12GF_fpn_1x_coco.py)| pytorch | 1x |7.4 ||42.2|38|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552.log.json) |
65
- |[RegNetX-3.2GF-FPN-DCN-C3-C5](./mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py)| pytorch | 1x |5.0 ||40.3|36.6|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726.log.json) |
66
-
67
- ### Faster R-CNN
68
-
69
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
70
- | :---------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: |
71
- | [R-50-FPN](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py)| pytorch | 1x | 4.0 | 18.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) |
72
- |[RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py)| pytorch | 1x | 4.5||39.9|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927.log.json) |
73
- |[RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py)| pytorch | 2x | 4.5||41.1|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955.log.json) |
74
-
75
- ### RetinaNet
76
-
77
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
78
- | :---------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: |
79
- | [R-50-FPN](../retinanet/retinanet_r50_fpn_1x_coco.py) | pytorch | 1x | 3.8 | 16.6 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) |
80
- |[RegNetX-800MF-FPN](./retinanet_regnetx-800MF_fpn_1x_coco.py)| pytorch | 1x |2.5||35.6|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403.log.json) |
81
- |[RegNetX-1.6GF-FPN](./retinanet_regnetx-1.6GF_fpn_1x_coco.py)| pytorch | 1x |3.3||37.3|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403.log.json) |
82
- |[RegNetX-3.2GF-FPN](./retinanet_regnetx-3.2GF_fpn_1x_coco.py)| pytorch | 1x |4.2 ||39.1|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) |
83
-
84
- ### Pre-trained models
85
-
86
- We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks.
87
-
88
- | Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
89
- | :-----: | :-----: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
90
- |Faster RCNN |[RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py)| pytorch | 3x |5.0 ||42.2|-|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200520_224253-bf85ae3e.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200520_224253.log.json) |
91
- |Mask RCNN |[RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py)| pytorch | 3x |5.0 ||43.1|38.7|[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221.log.json) |
92
-
93
- ### Notice
94
-
95
- 1. The models are trained using a different weight decay, i.e., `weight_decay=5e-5` according to the setting in ImageNet training. This brings improvement of at least 0.7 AP absolute but does not improve the model using ResNet-50.
96
- 2. RetinaNets using RegNets are trained with learning rate 0.02 with gradient clip. We find that using learning rate 0.02 could improve the results by at least 0.7 AP absolute and gradient clip is necessary to stabilize the training. However, this does not improve the performance of ResNet-50-FPN RetinaNet.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './encnet_r50-d8_512x512_80k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/README.md DELETED
@@ -1,85 +0,0 @@
1
- # Fully Convolutional Networks for Semantic Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @article{shelhamer2017fully,
9
- title={Fully convolutional networks for semantic segmentation},
10
- author={Shelhamer, Evan and Long, Jonathan and Darrell, Trevor},
11
- journal={IEEE transactions on pattern analysis and machine intelligence},
12
- volume={39},
13
- number={4},
14
- pages={640--651},
15
- year={2017},
16
- publisher={IEEE Trans Pattern Anal Mach Intell}
17
- }
18
- ```
19
-
20
- ## Results and models
21
-
22
- ### Cityscapes
23
-
24
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
25
- | ------ | ---------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
26
- | FCN | R-50-D8 | 512x1024 | 40000 | 5.7 | 4.17 | 72.25 | 73.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json) |
27
- | FCN | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.66 | 75.45 | 76.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852.log.json) |
28
- | FCN | R-50-D8 | 769x769 | 40000 | 6.5 | 1.80 | 71.47 | 72.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104.log.json) |
29
- | FCN | R-101-D8 | 769x769 | 40000 | 10.4 | 1.19 | 73.93 | 75.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208.log.json) |
30
- | FCN | R-18-D8 | 512x1024 | 80000 | 1.7 | 14.65 | 71.11 | 72.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes_20201225_021327-6c50f8b4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes-20201225_021327.log.json) |
31
- | FCN | R-50-D8 | 512x1024 | 80000 | - | | 73.61 | 74.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019.log.json) |
32
- | FCN | R-101-D8 | 512x1024 | 80000 | - | - | 75.13 | 75.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038.log.json) |
33
- | FCN | R-18-D8 | 769x769 | 80000 | 1.9 | 6.40 | 70.80 | 73.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes_20201225_021451-9739d1b8.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes-20201225_021451.log.json) |
34
- | FCN | R-50-D8 | 769x769 | 80000 | - | - | 72.64 | 73.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749.log.json) |
35
- | FCN | R-101-D8 | 769x769 | 80000 | - | - | 75.52 | 76.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354.log.json) |
36
- | FCN | R-18b-D8 | 512x1024 | 80000 | 1.6 | 16.74 | 70.24 | 72.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes_20201225_230143-92c0f445.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes-20201225_230143.log.json) |
37
- | FCN | R-50b-D8 | 512x1024 | 80000 | 5.6 | 4.20 | 75.65 | 77.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes_20201225_094221-82957416.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes-20201225_094221.log.json) |
38
- | FCN | R-101b-D8 | 512x1024 | 80000 | 9.1 | 2.73 | 77.37 | 78.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes_20201226_160213-4543858f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes-20201226_160213.log.json) |
39
- | FCN | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.70 | 69.66 | 72.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes_20201226_004430-32d504e5.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes-20201226_004430.log.json) |
40
- | FCN | R-50b-D8 | 769x769 | 80000 | 6.3 | 1.82 | 73.83 | 76.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes_20201225_094223-94552d38.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes-20201225_094223.log.json) |
41
- | FCN | R-101b-D8 | 769x769 | 80000 | 10.3 | 1.15 | 77.02 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes_20201226_170012-82be37e2.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes-20201226_170012.log.json) |
42
- | FCN-D6 | R-50-D16 | 512x1024 | 40000 | 3.4 | 10.22 | 77.06 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes-98d5d1bc.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes-20210305_130133.log.json) |
43
- | FCN-D6 | R-50-D16 | 512x1024 | 80000 | - | 10.35 | 77.27 | 78.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes-98d5d1bc.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes-20210306_115604.log.json) |
44
- | FCN-D6 | R-50-D16 | 769x769 | 40000 | 3.7 | 4.17 | 76.82 | 78.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes-1aab18ed.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes-20210305_185744.log.json) |
45
- | FCN-D6 | R-50-D16 | 769x769 | 80000 | - | 4.15 | 77.04 | 78.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes-109d88eb.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes-20210305_200413.log.json) |
46
- | FCN-D6 | R-101-D16 | 512x1024 | 40000 | 4.5 | 8.04 | 77.36 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes-9cf2b450.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes-20210305_130337.log.json) |
47
- | FCN-D6 | R-101-D16 | 512x1024 | 80000 | - | 8.26 | 78.46 | 80.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes-cb336445.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes-20210308_102747.log.json) |
48
- | FCN-D6 | R-101-D16 | 769x769 | 40000 | 5.0 | 3.12 | 77.28 | 78.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes-60b114e9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes-20210308_102453.log.json) |
49
- | FCN-D6 | R-101-D16 | 769x769 | 80000 | - | 3.21 | 78.06 | 79.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes-e33adc4f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes-20210306_120016.log.json) |
50
- | FCN-D6 | R-50b-D16 | 512x1024 | 80000 | 3.2 | 10.16 | 76.99 | 79.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50b_d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_512x1024_80k_cityscapes/fcn_d6_r50b_d16_512x1024_80k_cityscapes-6a0b62e9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_512x1024_80k_cityscapes/fcn_d6_r50b_d16_512x1024_80k_cityscapes-20210311_125550.log.json) |
51
- | FCN-D6 | R-50b-D16 | 769x769 | 80000 | 3.6 | 4.17 | 76.86 | 78.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50b_d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_769x769_80k_cityscapes/fcn_d6_r50b_d16_769x769_80k_cityscapes-d665f231.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_769x769_80k_cityscapes/fcn_d6_r50b_d16_769x769_80k_cityscapes-20210311_131012.log.json) |
52
- | FCN-D6 | R-101b-D16 | 512x1024 | 80000 | 4.3 | 8.46 | 77.72 | 79.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101b_d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_512x1024_80k_cityscapes/fcn_d6_r101b_d16_512x1024_80k_cityscapes-3f2eb5b4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_512x1024_80k_cityscapes/fcn_d6_r101b_d16_512x1024_80k_cityscapes-20210311_144305.log.json) |
53
- | FCN-D6 | R-101b-D16 | 769x769 | 80000 | 4.8 | 3.32 | 77.34 | 78.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101b_d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_769x769_80k_cityscapes/fcn_d6_r101b_d16_769x769_80k_cityscapes-c4d8bfbc.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_769x769_80k_cityscapes/fcn_d6_r101b_d16_769x769_80k_cityscapes-20210311_154527.log.json) |
54
-
55
- ### ADE20K
56
-
57
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
58
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
59
- | FCN | R-50-D8 | 512x512 | 80000 | 8.5 | 23.49 | 35.94 | 37.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016.log.json) |
60
- | FCN | R-101-D8 | 512x512 | 80000 | 12 | 14.78 | 39.61 | 40.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143.log.json) |
61
- | FCN | R-50-D8 | 512x512 | 160000 | - | - | 36.10 | 38.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713.log.json) |
62
- | FCN | R-101-D8 | 512x512 | 160000 | - | - | 39.91 | 41.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) |
63
-
64
- ### Pascal VOC 2012 + Aug
65
-
66
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
67
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
68
- | FCN | R-50-D8 | 512x512 | 20000 | 5.7 | 23.28 | 67.08 | 69.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715.log.json) |
69
- | FCN | R-101-D8 | 512x512 | 20000 | 9.2 | 14.81 | 71.16 | 73.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842.log.json) |
70
- | FCN | R-50-D8 | 512x512 | 40000 | - | - | 66.97 | 69.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) |
71
- | FCN | R-101-D8 | 512x512 | 40000 | - | - | 69.91 | 72.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240.log.json) |
72
-
73
- ### Pascal Context
74
-
75
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
76
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
77
- | FCN | R-101-D8 | 480x480 | 40000 | - | 9.93 | 44.43 | 45.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context-20210421_154757-b5e97937.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context-20210421_154757.log.json) |
78
- | FCN | R-101-D8 | 480x480 | 80000 | - | - | 44.13 | 45.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context-20210421_163310-4711813f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context-20210421_163310.log.json) |
79
-
80
- ### Pascal Context 59
81
-
82
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
83
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
84
- | FCN | R-101-D8 | 480x480 | 40000 | - | - | 48.42 | 50.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59_20210415_230724-8cf83682.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59-20210415_230724.log.json) |
85
- | FCN | R-101-D8 | 480x480 | 80000 | - | - | 49.35 | 51.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59_20210416_110804-9a6f2c94.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59-20210416_110804.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnthonyTruchetPoC/persistent-docker/scripts/run-tests.sh DELETED
@@ -1,2 +0,0 @@
1
- #!/usr/bin/env sh
2
- poetry run pytest
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/benchmarks.md DELETED
@@ -1,196 +0,0 @@
1
-
2
- # Benchmarks
3
-
4
- Here we benchmark the training speed of a Mask R-CNN in detectron2,
5
- with some other popular open source Mask R-CNN implementations.
6
-
7
-
8
- ### Settings
9
-
10
- * Hardware: 8 NVIDIA V100s with NVLink.
11
- * Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.5,
12
- TensorFlow 1.15.0rc2, Keras 2.2.5, MxNet 1.6.0b20190820.
13
- * Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the
14
- [Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml)
15
- (it does not have scale augmentation).
16
- * Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time.
17
- Note that for R-CNN-style models, the throughput of a model typically changes during training, because
18
- it depends on the predictions of the model. Therefore this metric is not directly comparable with
19
- "train speed" in model zoo, which is the average speed of the entire training run.
20
-
21
-
22
- ### Main Results
23
-
24
- ```eval_rst
25
- +-------------------------------+--------------------+
26
- | Implementation | Throughput (img/s) |
27
- +===============================+====================+
28
- | |D2| |PT| | 62 |
29
- +-------------------------------+--------------------+
30
- | mmdetection_ |PT| | 53 |
31
- +-------------------------------+--------------------+
32
- | maskrcnn-benchmark_ |PT| | 53 |
33
- +-------------------------------+--------------------+
34
- | tensorpack_ |TF| | 50 |
35
- +-------------------------------+--------------------+
36
- | simpledet_ |mxnet| | 39 |
37
- +-------------------------------+--------------------+
38
- | Detectron_ |C2| | 19 |
39
- +-------------------------------+--------------------+
40
- | `matterport/Mask_RCNN`__ |TF| | 14 |
41
- +-------------------------------+--------------------+
42
-
43
- .. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/
44
- .. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN
45
- .. _mmdetection: https://github.com/open-mmlab/mmdetection/
46
- .. _simpledet: https://github.com/TuSimple/simpledet/
47
- .. _Detectron: https://github.com/facebookresearch/Detectron
48
- __ https://github.com/matterport/Mask_RCNN/
49
-
50
- .. |D2| image:: https://github.com/facebookresearch/detectron2/raw/main/.github/Detectron2-Logo-Horz.svg?sanitize=true
51
- :height: 15pt
52
- :target: https://github.com/facebookresearch/detectron2/
53
- .. |PT| image:: https://pytorch.org/assets/images/logo-icon.svg
54
- :width: 15pt
55
- :height: 15pt
56
- :target: https://pytorch.org
57
- .. |TF| image:: https://static.nvidiagrid.net/ngc/containers/tensorflow.png
58
- :width: 15pt
59
- :height: 15pt
60
- :target: https://tensorflow.org
61
- .. |mxnet| image:: https://github.com/dmlc/web-data/raw/master/mxnet/image/mxnet_favicon.png
62
- :width: 15pt
63
- :height: 15pt
64
- :target: https://mxnet.apache.org/
65
- .. |C2| image:: https://caffe2.ai/static/logo.svg
66
- :width: 15pt
67
- :height: 15pt
68
- :target: https://caffe2.ai
69
- ```
70
-
71
-
72
- Details for each implementation:
73
-
74
- * __Detectron2__: with release v0.1.2, run:
75
- ```
76
- python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8
77
- ```
78
-
79
- * __mmdetection__: at commit `b0d845f`, run
80
- ```
81
- ./tools/dist_train.sh configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py 8
82
- ```
83
-
84
- * __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i 's/torch.uint8/torch.bool/g' **/*.py; sed -i 's/AT_CHECK/TORCH_CHECK/g' **/*.cu`
85
- to make it compatible with PyTorch 1.5. Then, run training with
86
- ```
87
- python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml
88
- ```
89
- The speed we observed is faster than its model zoo, likely due to different software versions.
90
-
91
- * __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run
92
- ```
93
- mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz
94
- ```
95
-
96
- * __SimpleDet__: at commit `9187a1`, run
97
- ```
98
- python detection_train.py --config config/mask_r50v1_fpn_1x.py
99
- ```
100
-
101
- * __Detectron__: run
102
- ```
103
- python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml
104
- ```
105
- Note that many of its ops run on CPUs, therefore the performance is limited.
106
-
107
- * __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run
108
- ```
109
- python coco.py train --dataset=/data/coco/ --model=imagenet
110
- ```
111
- Note that many small details in this implementation might be different
112
- from Detectron's standards.
113
-
114
- <details>
115
- <summary>
116
- (diff to make it use the same hyperparameters - click to expand)
117
- </summary>
118
-
119
- ```diff
120
- diff --git i/mrcnn/model.py w/mrcnn/model.py
121
- index 62cb2b0..61d7779 100644
122
- --- i/mrcnn/model.py
123
- +++ w/mrcnn/model.py
124
- @@ -2367,8 +2367,8 @@ class MaskRCNN():
125
- epochs=epochs,
126
- steps_per_epoch=self.config.STEPS_PER_EPOCH,
127
- callbacks=callbacks,
128
- - validation_data=val_generator,
129
- - validation_steps=self.config.VALIDATION_STEPS,
130
- + #validation_data=val_generator,
131
- + #validation_steps=self.config.VALIDATION_STEPS,
132
- max_queue_size=100,
133
- workers=workers,
134
- use_multiprocessing=True,
135
- diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py
136
- index d2bf53b..060172a 100644
137
- --- i/mrcnn/parallel_model.py
138
- +++ w/mrcnn/parallel_model.py
139
- @@ -32,6 +32,7 @@ class ParallelModel(KM.Model):
140
- keras_model: The Keras model to parallelize
141
- gpu_count: Number of GPUs. Must be > 1
142
- """
143
- + super().__init__()
144
- self.inner_model = keras_model
145
- self.gpu_count = gpu_count
146
- merged_outputs = self.make_parallel()
147
- diff --git i/samples/coco/coco.py w/samples/coco/coco.py
148
- index 5d172b5..239ed75 100644
149
- --- i/samples/coco/coco.py
150
- +++ w/samples/coco/coco.py
151
- @@ -81,7 +81,10 @@ class CocoConfig(Config):
152
- IMAGES_PER_GPU = 2
153
-
154
- # Uncomment to train on 8 GPUs (default is 1)
155
- - # GPU_COUNT = 8
156
- + GPU_COUNT = 8
157
- + BACKBONE = "resnet50"
158
- + STEPS_PER_EPOCH = 50
159
- + TRAIN_ROIS_PER_IMAGE = 512
160
-
161
- # Number of classes (including background)
162
- NUM_CLASSES = 1 + 80 # COCO has 80 classes
163
- @@ -496,29 +499,10 @@ if __name__ == '__main__':
164
- # *** This training schedule is an example. Update to your needs ***
165
-
166
- # Training - Stage 1
167
- - print("Training network heads")
168
- model.train(dataset_train, dataset_val,
169
- learning_rate=config.LEARNING_RATE,
170
- epochs=40,
171
- - layers='heads',
172
- - augmentation=augmentation)
173
- -
174
- - # Training - Stage 2
175
- - # Finetune layers from ResNet stage 4 and up
176
- - print("Fine tune Resnet stage 4 and up")
177
- - model.train(dataset_train, dataset_val,
178
- - learning_rate=config.LEARNING_RATE,
179
- - epochs=120,
180
- - layers='4+',
181
- - augmentation=augmentation)
182
- -
183
- - # Training - Stage 3
184
- - # Fine tune all layers
185
- - print("Fine tune all layers")
186
- - model.train(dataset_train, dataset_val,
187
- - learning_rate=config.LEARNING_RATE / 10,
188
- - epochs=160,
189
- - layers='all',
190
- + layers='3+',
191
- augmentation=augmentation)
192
-
193
- elif args.command == "evaluate":
194
- ```
195
-
196
- </details>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ayanoaisho/L/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: L
3
- emoji: ⚡
4
- colorFrom: green
5
- colorTo: yellow
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/tone_sandhi.py DELETED
@@ -1,351 +0,0 @@
1
- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from typing import List
15
- from typing import Tuple
16
-
17
- import jieba
18
- from pypinyin import lazy_pinyin
19
- from pypinyin import Style
20
-
21
-
22
- class ToneSandhi():
23
- def __init__(self):
24
- self.must_neural_tone_words = {
25
- '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝',
26
- '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊',
27
- '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去',
28
- '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号',
29
- '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当',
30
- '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻',
31
- '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂',
32
- '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆',
33
- '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂',
34
- '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿',
35
- '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台',
36
- '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算',
37
- '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨',
38
- '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快',
39
- '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜',
40
- '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔',
41
- '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事',
42
- '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾',
43
- '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼',
44
- '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实',
45
- '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头',
46
- '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼',
47
- '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数',
48
- '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气',
49
- '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈',
50
- '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方',
51
- '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴',
52
- '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦',
53
- '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝',
54
- '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹',
55
- '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息',
56
- '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤',
57
- '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家',
58
- '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故',
59
- '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨',
60
- '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅',
61
- '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱',
62
- '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱',
63
- '扫把', '惦记'
64
- }
65
- self.must_not_neural_tone_words = {
66
- "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎"
67
- }
68
- self.punc = ":,;。?!“”‘’':,;.?!"
69
-
70
- # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
71
- # e.g.
72
- # word: "家里"
73
- # pos: "s"
74
- # finals: ['ia1', 'i3']
75
- def _neural_sandhi(self, word: str, pos: str,
76
- finals: List[str]) -> List[str]:
77
-
78
- # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
79
- for j, item in enumerate(word):
80
- if j - 1 >= 0 and item == word[j - 1] and pos[0] in {
81
- "n", "v", "a"
82
- } and word not in self.must_not_neural_tone_words:
83
- finals[j] = finals[j][:-1] + "5"
84
- ge_idx = word.find("个")
85
- if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
86
- finals[-1] = finals[-1][:-1] + "5"
87
- elif len(word) >= 1 and word[-1] in "的地得":
88
- finals[-1] = finals[-1][:-1] + "5"
89
- # e.g. 走了, 看着, 去过
90
- # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
91
- # finals[-1] = finals[-1][:-1] + "5"
92
- elif len(word) > 1 and word[-1] in "们子" and pos in {
93
- "r", "n"
94
- } and word not in self.must_not_neural_tone_words:
95
- finals[-1] = finals[-1][:-1] + "5"
96
- # e.g. 桌上, 地下, 家里
97
- elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
98
- finals[-1] = finals[-1][:-1] + "5"
99
- # e.g. 上来, 下去
100
- elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
101
- finals[-1] = finals[-1][:-1] + "5"
102
- # 个做量词
103
- elif (ge_idx >= 1 and
104
- (word[ge_idx - 1].isnumeric() or
105
- word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个':
106
- finals[ge_idx] = finals[ge_idx][:-1] + "5"
107
- else:
108
- if word in self.must_neural_tone_words or word[
109
- -2:] in self.must_neural_tone_words:
110
- finals[-1] = finals[-1][:-1] + "5"
111
-
112
- word_list = self._split_word(word)
113
- finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]]
114
- for i, word in enumerate(word_list):
115
- # conventional neural in Chinese
116
- if word in self.must_neural_tone_words or word[
117
- -2:] in self.must_neural_tone_words:
118
- finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
119
- finals = sum(finals_list, [])
120
- return finals
121
-
122
- def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
123
- # e.g. 看不懂
124
- if len(word) == 3 and word[1] == "不":
125
- finals[1] = finals[1][:-1] + "5"
126
- else:
127
- for i, char in enumerate(word):
128
- # "不" before tone4 should be bu2, e.g. 不怕
129
- if char == "不" and i + 1 < len(word) and finals[i +
130
- 1][-1] == "4":
131
- finals[i] = finals[i][:-1] + "2"
132
- return finals
133
-
134
- def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
135
- # "一" in number sequences, e.g. 一零零, 二一零
136
- if word.find("一") != -1 and all(
137
- [item.isnumeric() for item in word if item != "一"]):
138
- return finals
139
- # "一" between reduplication words shold be yi5, e.g. 看一看
140
- elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
141
- finals[1] = finals[1][:-1] + "5"
142
- # when "一" is ordinal word, it should be yi1
143
- elif word.startswith("第一"):
144
- finals[1] = finals[1][:-1] + "1"
145
- else:
146
- for i, char in enumerate(word):
147
- if char == "一" and i + 1 < len(word):
148
- # "一" before tone4 should be yi2, e.g. 一段
149
- if finals[i + 1][-1] == "4":
150
- finals[i] = finals[i][:-1] + "2"
151
- # "一" before non-tone4 should be yi4, e.g. 一天
152
- else:
153
- # "一" 后面如果是标点,还读一声
154
- if word[i + 1] not in self.punc:
155
- finals[i] = finals[i][:-1] + "4"
156
- return finals
157
-
158
- def _split_word(self, word: str) -> List[str]:
159
- word_list = jieba.cut_for_search(word)
160
- word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
161
- first_subword = word_list[0]
162
- first_begin_idx = word.find(first_subword)
163
- if first_begin_idx == 0:
164
- second_subword = word[len(first_subword):]
165
- new_word_list = [first_subword, second_subword]
166
- else:
167
- second_subword = word[:-len(first_subword)]
168
- new_word_list = [second_subword, first_subword]
169
- return new_word_list
170
-
171
- def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
172
- if len(word) == 2 and self._all_tone_three(finals):
173
- finals[0] = finals[0][:-1] + "2"
174
- elif len(word) == 3:
175
- word_list = self._split_word(word)
176
- if self._all_tone_three(finals):
177
- # disyllabic + monosyllabic, e.g. 蒙古/包
178
- if len(word_list[0]) == 2:
179
- finals[0] = finals[0][:-1] + "2"
180
- finals[1] = finals[1][:-1] + "2"
181
- # monosyllabic + disyllabic, e.g. 纸/老虎
182
- elif len(word_list[0]) == 1:
183
- finals[1] = finals[1][:-1] + "2"
184
- else:
185
- finals_list = [
186
- finals[:len(word_list[0])], finals[len(word_list[0]):]
187
- ]
188
- if len(finals_list) == 2:
189
- for i, sub in enumerate(finals_list):
190
- # e.g. 所有/人
191
- if self._all_tone_three(sub) and len(sub) == 2:
192
- finals_list[i][0] = finals_list[i][0][:-1] + "2"
193
- # e.g. 好/喜欢
194
- elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \
195
- finals_list[0][-1][-1] == "3":
196
-
197
- finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
198
- finals = sum(finals_list, [])
199
- # split idiom into two words who's length is 2
200
- elif len(word) == 4:
201
- finals_list = [finals[:2], finals[2:]]
202
- finals = []
203
- for sub in finals_list:
204
- if self._all_tone_three(sub):
205
- sub[0] = sub[0][:-1] + "2"
206
- finals += sub
207
-
208
- return finals
209
-
210
- def _all_tone_three(self, finals: List[str]) -> bool:
211
- return all(x[-1] == "3" for x in finals)
212
-
213
- # merge "不" and the word behind it
214
- # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
215
- def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
216
- new_seg = []
217
- last_word = ""
218
- for word, pos in seg:
219
- if last_word == "不":
220
- word = last_word + word
221
- if word != "不":
222
- new_seg.append((word, pos))
223
- last_word = word[:]
224
- if last_word == "不":
225
- new_seg.append((last_word, 'd'))
226
- last_word = ""
227
- return new_seg
228
-
229
- # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
230
- # function 2: merge single "一" and the word behind it
231
- # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
232
- # e.g.
233
- # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
234
- # output seg: [['听一听', 'v']]
235
- def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
236
- new_seg = []
237
- # function 1
238
- for i, (word, pos) in enumerate(seg):
239
- if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][
240
- 0] == seg[i + 1][0] and seg[i - 1][1] == "v":
241
- new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
242
- else:
243
- if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][
244
- 0] == word and pos == "v":
245
- continue
246
- else:
247
- new_seg.append([word, pos])
248
- seg = new_seg
249
- new_seg = []
250
- # function 2
251
- for i, (word, pos) in enumerate(seg):
252
- if new_seg and new_seg[-1][0] == "一":
253
- new_seg[-1][0] = new_seg[-1][0] + word
254
- else:
255
- new_seg.append([word, pos])
256
- return new_seg
257
-
258
- # the first and the second words are all_tone_three
259
- def _merge_continuous_three_tones(
260
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
261
- new_seg = []
262
- sub_finals_list = [
263
- lazy_pinyin(
264
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
265
- for (word, pos) in seg
266
- ]
267
- assert len(sub_finals_list) == len(seg)
268
- merge_last = [False] * len(seg)
269
- for i, (word, pos) in enumerate(seg):
270
- if i - 1 >= 0 and self._all_tone_three(
271
- sub_finals_list[i - 1]) and self._all_tone_three(
272
- sub_finals_list[i]) and not merge_last[i - 1]:
273
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
274
- if not self._is_reduplication(seg[i - 1][0]) and len(
275
- seg[i - 1][0]) + len(seg[i][0]) <= 3:
276
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
277
- merge_last[i] = True
278
- else:
279
- new_seg.append([word, pos])
280
- else:
281
- new_seg.append([word, pos])
282
-
283
- return new_seg
284
-
285
- def _is_reduplication(self, word: str) -> bool:
286
- return len(word) == 2 and word[0] == word[1]
287
-
288
- # the last char of first word and the first char of second word is tone_three
289
- def _merge_continuous_three_tones_2(
290
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
291
- new_seg = []
292
- sub_finals_list = [
293
- lazy_pinyin(
294
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
295
- for (word, pos) in seg
296
- ]
297
- assert len(sub_finals_list) == len(seg)
298
- merge_last = [False] * len(seg)
299
- for i, (word, pos) in enumerate(seg):
300
- if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \
301
- merge_last[i - 1]:
302
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
303
- if not self._is_reduplication(seg[i - 1][0]) and len(
304
- seg[i - 1][0]) + len(seg[i][0]) <= 3:
305
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
306
- merge_last[i] = True
307
- else:
308
- new_seg.append([word, pos])
309
- else:
310
- new_seg.append([word, pos])
311
- return new_seg
312
-
313
- def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
314
- new_seg = []
315
- for i, (word, pos) in enumerate(seg):
316
- if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#":
317
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
318
- else:
319
- new_seg.append([word, pos])
320
- return new_seg
321
-
322
- def _merge_reduplication(
323
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
324
- new_seg = []
325
- for i, (word, pos) in enumerate(seg):
326
- if new_seg and word == new_seg[-1][0]:
327
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
328
- else:
329
- new_seg.append([word, pos])
330
- return new_seg
331
-
332
- def pre_merge_for_modify(
333
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
334
- seg = self._merge_bu(seg)
335
- try:
336
- seg = self._merge_yi(seg)
337
- except:
338
- print("_merge_yi failed")
339
- seg = self._merge_reduplication(seg)
340
- seg = self._merge_continuous_three_tones(seg)
341
- seg = self._merge_continuous_three_tones_2(seg)
342
- seg = self._merge_er(seg)
343
- return seg
344
-
345
- def modified_tone(self, word: str, pos: str,
346
- finals: List[str]) -> List[str]:
347
- finals = self._bu_sandhi(word, finals)
348
- finals = self._yi_sandhi(word, finals)
349
- finals = self._neural_sandhi(word, pos, finals)
350
- finals = self._three_sandhi(word, finals)
351
- return finals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Carx Deriva Carreras Mod Apk Vieja Versin.md DELETED
@@ -1,88 +0,0 @@
1
- <br />
2
- <h1>CarX Drift Racing Mod APK versión antigua: Una revisión</h1>
3
- <p>Si usted es un fan de los juegos de carreras de coches, es posible que haya oído hablar de CarX Drift Racing. Es uno de los juegos de deriva más populares y realistas en Android. Puede elegir entre una variedad de coches, personalizarlos y competir en diferentes pistas. También puede competir con otros jugadores en línea y mostrar sus habilidades a la deriva. </p>
4
- <h2>carx deriva carreras mod apk vieja versión</h2><br /><p><b><b>Download Zip</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://bltlly.com/2v6Kna">https://bltlly.com/2v6Kna</a></b></p><br /><br />
5
- <p>Pero ¿y si quieres disfrutar del juego sin limitaciones? ¿Qué pasa si quieres tener dinero y oro ilimitados, desbloquear todos los coches y pistas, y experimentar los mejores gráficos y la física? Bueno, usted puede hacer eso mediante la descarga de la versión apk mod de CarX Drift Racing. Y no solo cualquier apk mod, pero la versión anterior de la misma. </p>
6
- <p>¿Por qué la versión anterior, se pregunta? Bueno, hay algunas razones por las que podría preferir la versión anterior a la última. En este artículo, vamos a revisar el CarX Drift Racing mod apk versión antigua, y le dirá cómo descargar e instalar en su dispositivo. También vamos a enumerar algunos pros y contras de usar este apk mod, y responder a algunas preguntas frecuentes. </p>
7
- <h2>¿Qué es CarX Drift Racing? </h2>
8
- <p>CarX Drift Racing es un juego de carreras desarrollado por CarX Technologies. Fue lanzado en 2014 para dispositivos Android e iOS. El juego se centra en la deriva, que es una técnica de conducción donde el conductor sobreviraje intencionalmente el coche para que se deslice hacia los lados. El juego cuenta con física realista, gráficos impresionantes y controles suaves. Puedes sentir la emoción de la deriva en varios modos, como el modo carrera, el modo en línea, el modo de ataque en el tiempo y el modo de entrenamiento. </p>
9
- <p></p>
10
-
11
- <h2>Características de CarX Drift Racing</h2>
12
- <p>Algunas de las características principales de CarX Drift Racing son:</p>
13
- <ul>
14
- <li>Física realista que simula el comportamiento de los coches reales en diferentes superficies</li>
15
- <li>Impresionantes gráficos que crean una atmósfera inmersiva</li>
16
- <li>Controles suaves que te permiten desplazarte fácilmente</li>
17
- <li>Una variedad de coches para elegir, cada uno con diferentes características y sonidos</li>
18
- <li>Muchas opciones de personalización para tus coches</li>
19
- <li>Una variedad de pistas para competir, cada una con diferentes desafíos y entornos</li>
20
- <li>Un modo de carrera donde se puede progresar a través de los niveles y ganar dinero y oro</li>
21
- <li>Un modo online donde puedes competir con otros jugadores alrededor del mundo</li>
22
- <li>Un modo de ataque de tiempo donde puedes establecer tus mejores tiempos de vuelta</li>
23
- <li>Un modo de entrenamiento donde puedes practicar tus habilidades de deriva</li>
24
- <li>Una tabla de clasificación donde puedes ver tu ranking entre otros jugadores</li>
25
- <li>Un modo fantasma donde puedes competir contra tus propios fantasmas o los de otros jugadores</li>
26
- <li>Un modo de repetición donde puedes ver las carreras de tus otros jugadores</li>
27
- <li>Un modo de ajuste donde se puede ajustar la configuración de su coche para adaptarse a sus preferencias</li>
28
- <li>Un garaje donde puedes guardar tus coches y ver sus estadísticas</li>
29
- </ul>
30
- <h2>¿Por qué descargar la versión antigua de CarX Drift Racing mod apk? </h2>
31
- <p>CarX Drift Racing es un gran juego, pero también tiene algunas limitaciones. Por ejemplo, necesitas gastar mucho dinero y oro para desbloquear y mejorar tus coches y pistas. También necesitas ver anuncios para obtener recompensas o bonificaciones. Y es posible que tengas problemas de compatibilidad o errores con la última versión del juego. </p>
32
- <p>Es por eso que algunos jugadores prefieren descargar la versión anterior de CarX Drift Racing mod apk. Un apk mod es una versión modificada del juego original que tiene algunas características o cambios que no están disponibles en la versión oficial. Por ejemplo, la versión anterior de CarX Drift Racing mod apk ha:</p>
33
- <ul>
34
-
35
- <li>Coches y pistas desbloqueados, para que pueda acceder a todo el contenido sin restricciones</li>
36
- <li>Física realista y gráficos, para que pueda disfrutar de la mejor experiencia de juego</li>
37
- </ul>
38
- <p>Al descargar la versión anterior de CarX Drift Racing mod apk, usted puede tener más diversión y libertad en el juego. También puede evitar algunos de los problemas que podrían ocurrir con la última versión del juego. </p>
39
- <h2>¿Cómo descargar e instalar CarX Drift Racing mod apk versión antigua? </h2>
40
- <p>Si desea descargar e instalar la versión antigua de CarX Drift Racing mod apk en su dispositivo, debe seguir estos pasos:</p>
41
- <h3>Paso 1: Descargar el archivo apk</h3>
42
- <p>El primer paso es descargar el archivo apk de CarX Drift Racing mod apk versión antigua. Puede encontrarlo en varios sitios web que ofrecen juegos y aplicaciones modificadas. Sin embargo, debe tener cuidado y elegir una fuente confiable y segura. Algunos sitios web pueden tener archivos falsos o maliciosos que pueden dañar su dispositivo o robar sus datos. </p>
43
- <p>Uno de los sitios web que recomendamos es [APKPure]. Es un sitio web confiable y popular que proporciona archivos apk libres y puros para los usuarios de Android. Puede descargar CarX Drift Racing mod apk versión antigua de este [enlace]. El tamaño del archivo es de unos 300 MB, así que asegúrate de tener suficiente espacio en tu dispositivo. </p>
44
- <h3>Paso 2: Habilitar fuentes desconocidas</h3>
45
- <p>El siguiente paso es habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, debe ir a la configuración del dispositivo, luego a la seguridad y luego a fuentes desconocidas. Pulse en el interruptor o en la casilla de verificación para activarlo. </p>
46
- <h3>Paso 3: Instalar el archivo apk</h3>
47
- <p>El tercer paso es instalar el archivo apk de CarX Drift Racing mod apk versión antigua. Para ello, debe localizar el archivo en el administrador de archivos o la carpeta de descargas de su dispositivo. Toque en el archivo y siga las instrucciones en la pantalla. La instalación puede tardar unos minutos. </p>
48
-
49
- <p>El paso final es lanzar el juego y disfrutarlo. Puede encontrar el icono del juego en la pantalla de inicio del dispositivo o en el cajón de la aplicación. Tócalo y empieza a jugar. Verás que tienes dinero y oro ilimitados, coches y pistas desbloqueados, y física y gráficos realistas. </p>
50
- <h2>Pros y contras de CarX Drift Racing mod apk versión antigua</h2>
51
- <p>Como con cualquier juego modificado, hay algunos pros y contras de usar CarX Drift Racing mod apk versión antigua. Aquí están algunos de ellos:</p>
52
- <h3>Pros</h3>
53
- <ul>
54
- <li><h4>Dinero y oro ilimitados</h4>
55
- <p>Esta es una de las principales ventajas de usar CarX Drift Racing mod apk versión antigua. Tendrás dinero y oro ilimitados en el juego, lo que significa que puedes comprar y actualizar lo que quieras. También puedes saltarte anuncios y obtener recompensas o bonos gratis. </p></li>
56
- <li><h4>Coches y pistas desbloqueados</h4>
57
- <p>Este es otro beneficio de usar CarX Drift Racing mod apk versión antigua. Usted tendrá acceso a todos los coches y pistas en el juego, sin restricciones. Puedes elegir entre más de 40 coches y más de 30 pistas, cada una con diferentes características y desafíos. </p></li>
58
- <li><h4>Física y gráficos realistas</h4>
59
- <p>Este es también un punto a favor de la utilización de CarX Drift Racing mod apk versión antigua. Disfrutará de la mejor experiencia de juego con física y gráficos realistas. Usted sentirá la emoción de la deriva en diferentes superficies, con impresionantes efectos y sonidos. </p></li>
60
- </ul>
61
- <h3> Contras</h3>
62
- <ul>
63
- <li><h4>No es compatible con algunos dispositivos</h4>
64
- <p>Este es uno de los principales inconvenientes de usar CarX Drift Racing mod apk versión antigua. Es posible que tenga algunos problemas de compatibilidad con algunos dispositivos, especialmente los más nuevos. Es posible que el juego no funcione sin problemas o que se bloquee en algunos dispositivos, o que no pueda instalarlo en absoluto. </p></li>
65
- <li><h4>Puede contener errores y problemas técnicos</h4>
66
-
67
- <li><h4>No actualizado con nuevas características y contenido</h4>
68
- <p>Este es también un punto menos de usar CarX Drift Racing mod apk versión antigua. Te perderás las nuevas características y contenido que ofrece la versión oficial del juego. Por ejemplo, no podrás jugar a los nuevos modos, coches, pistas, eventos o desafíos que los desarrolladores añaden al juego regularmente. </p></li>
69
- </ul>
70
- <h2>Conclusión</h2>
71
- <p>CarX Drift Racing es un juego de deriva divertido y realista que puedes jugar en tu dispositivo Android. Sin embargo, si quieres tener más libertad y diversión en el juego, se puede descargar la antigua versión de CarX Drift Racing mod apk. Este apk mod le dará dinero ilimitado y oro, coches desbloqueados y pistas, y la física realista y gráficos. Sin embargo, también debe ser consciente de los contras de usar este apk mod, tales como problemas de compatibilidad, errores y fallas, y la falta de actualizaciones. </p>
72
- <p>Si está interesado en descargar e instalar CarX Drift Racing mod apk versión antigua, puede seguir los pasos que proporcionamos en este artículo. También recomendamos que utilice una fuente confiable y segura para descargar el archivo apk, como APKPure. Esperamos que este artículo sea útil e informativo para usted. ¡Feliz deriva! </p>
73
- <h2>Preguntas frecuentes</h2>
74
- <p>Aquí hay algunas preguntas frecuentes sobre CarX Drift Racing mod apk versión antigua:</p>
75
- <ul>
76
- <li><b>Es CarX Drift Racing mod apk vieja versión segura de usar? </b></li>
77
- <p>Sí, CarX Drift Racing mod apk versión antigua es seguro de usar, siempre y cuando se descarga desde una fuente confiable y segura. Sin embargo, siempre debe tener cuidado al descargar e instalar cualquier juego o aplicación modificada en su dispositivo, ya que algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. </p>
78
- <li><b>¿Puedo jugar CarX Drift Racing mod apk versión antigua en línea? </b></li>
79
-
80
- <li><b>¿Puedo actualizar la versión antigua de CarX Drift Racing mod apk? </b></li>
81
- <p>No, no se puede actualizar CarX Drift Racing mod apk versión antigua a la última versión del juego. Si intenta actualizarlo, perderá todas las características modificadas y los beneficios que tiene en la versión anterior. Por lo tanto, le aconsejamos que no lo actualice y disfrute de la versión anterior tal como está. </p>
82
- <li><b>¿Puedo usar CarX Drift Racing mod apk versión antigua con mi cuenta existente? </b></li>
83
- <p>No, no se puede utilizar CarX Drift Racing mod apk versión antigua con su cuenta existente. Si intentas hacerlo, puedes arriesgarte a perder tu cuenta o a que los desarrolladores del juego te prohíban acceder a ella. Por lo tanto, le recomendamos que cree una nueva cuenta o utilice una cuenta de invitado para jugar con este apk mod. </p>
84
- <li><b>¿Puedo desinstalar CarX Drift Racing mod apk versión antigua? </b></li>
85
- <p>Sí, puede desinstalar CarX Drift Racing mod apk versión antigua en cualquier momento que desee. Para ello, es necesario ir a la configuración de su dispositivo, a continuación, aplicaciones, a continuación, CarX Drift Racing. Toque en desinstalar y confirmar su elección. También puede eliminar el archivo apk del almacenamiento de su dispositivo si lo desea. </p>
86
- </ul></p> 64aa2da5cf<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/easter.py DELETED
@@ -1,89 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module offers a generic Easter computing method for any given year, using
4
- Western, Orthodox or Julian algorithms.
5
- """
6
-
7
- import datetime
8
-
9
- __all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
10
-
11
- EASTER_JULIAN = 1
12
- EASTER_ORTHODOX = 2
13
- EASTER_WESTERN = 3
14
-
15
-
16
- def easter(year, method=EASTER_WESTERN):
17
- """
18
- This method was ported from the work done by GM Arts,
19
- on top of the algorithm by Claus Tondering, which was
20
- based in part on the algorithm of Ouding (1940), as
21
- quoted in "Explanatory Supplement to the Astronomical
22
- Almanac", P. Kenneth Seidelmann, editor.
23
-
24
- This algorithm implements three different Easter
25
- calculation methods:
26
-
27
- 1. Original calculation in Julian calendar, valid in
28
- dates after 326 AD
29
- 2. Original method, with date converted to Gregorian
30
- calendar, valid in years 1583 to 4099
31
- 3. Revised method, in Gregorian calendar, valid in
32
- years 1583 to 4099 as well
33
-
34
- These methods are represented by the constants:
35
-
36
- * ``EASTER_JULIAN = 1``
37
- * ``EASTER_ORTHODOX = 2``
38
- * ``EASTER_WESTERN = 3``
39
-
40
- The default method is method 3.
41
-
42
- More about the algorithm may be found at:
43
-
44
- `GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
45
-
46
- and
47
-
48
- `The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
49
-
50
- """
51
-
52
- if not (1 <= method <= 3):
53
- raise ValueError("invalid method")
54
-
55
- # g - Golden year - 1
56
- # c - Century
57
- # h - (23 - Epact) mod 30
58
- # i - Number of days from March 21 to Paschal Full Moon
59
- # j - Weekday for PFM (0=Sunday, etc)
60
- # p - Number of days from March 21 to Sunday on or before PFM
61
- # (-6 to 28 methods 1 & 3, to 56 for method 2)
62
- # e - Extra days to add for method 2 (converting Julian
63
- # date to Gregorian date)
64
-
65
- y = year
66
- g = y % 19
67
- e = 0
68
- if method < 3:
69
- # Old method
70
- i = (19*g + 15) % 30
71
- j = (y + y//4 + i) % 7
72
- if method == 2:
73
- # Extra dates to convert Julian to Gregorian date
74
- e = 10
75
- if y > 1600:
76
- e = e + y//100 - 16 - (y//100 - 16)//4
77
- else:
78
- # New method
79
- c = y//100
80
- h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
81
- i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
82
- j = (y + y//4 + i + 2 - c + c//4) % 7
83
-
84
- # p can be from -6 to 56 corresponding to dates 22 March to 23 May
85
- # (later dates apply to method 2, although 23 May never actually occurs)
86
- p = i - j + e
87
- d = 1 + (p + 27 + (p + 6)//40) % 31
88
- m = 3 + (p + 26)//30
89
- return datetime.date(int(y), int(m), int(d))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/specifiers.py DELETED
@@ -1,802 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import abc
6
- import functools
7
- import itertools
8
- import re
9
- import warnings
10
- from typing import (
11
- Callable,
12
- Dict,
13
- Iterable,
14
- Iterator,
15
- List,
16
- Optional,
17
- Pattern,
18
- Set,
19
- Tuple,
20
- TypeVar,
21
- Union,
22
- )
23
-
24
- from .utils import canonicalize_version
25
- from .version import LegacyVersion, Version, parse
26
-
27
- ParsedVersion = Union[Version, LegacyVersion]
28
- UnparsedVersion = Union[Version, LegacyVersion, str]
29
- VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
30
- CallableOperator = Callable[[ParsedVersion, str], bool]
31
-
32
-
33
- class InvalidSpecifier(ValueError):
34
- """
35
- An invalid specifier was found, users should refer to PEP 440.
36
- """
37
-
38
-
39
- class BaseSpecifier(metaclass=abc.ABCMeta):
40
- @abc.abstractmethod
41
- def __str__(self) -> str:
42
- """
43
- Returns the str representation of this Specifier like object. This
44
- should be representative of the Specifier itself.
45
- """
46
-
47
- @abc.abstractmethod
48
- def __hash__(self) -> int:
49
- """
50
- Returns a hash value for this Specifier like object.
51
- """
52
-
53
- @abc.abstractmethod
54
- def __eq__(self, other: object) -> bool:
55
- """
56
- Returns a boolean representing whether or not the two Specifier like
57
- objects are equal.
58
- """
59
-
60
- @abc.abstractproperty
61
- def prereleases(self) -> Optional[bool]:
62
- """
63
- Returns whether or not pre-releases as a whole are allowed by this
64
- specifier.
65
- """
66
-
67
- @prereleases.setter
68
- def prereleases(self, value: bool) -> None:
69
- """
70
- Sets whether or not pre-releases as a whole are allowed by this
71
- specifier.
72
- """
73
-
74
- @abc.abstractmethod
75
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
76
- """
77
- Determines if the given item is contained within this specifier.
78
- """
79
-
80
- @abc.abstractmethod
81
- def filter(
82
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
83
- ) -> Iterable[VersionTypeVar]:
84
- """
85
- Takes an iterable of items and filters them so that only items which
86
- are contained within this specifier are allowed in it.
87
- """
88
-
89
-
90
- class _IndividualSpecifier(BaseSpecifier):
91
-
92
- _operators: Dict[str, str] = {}
93
- _regex: Pattern[str]
94
-
95
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
96
- match = self._regex.search(spec)
97
- if not match:
98
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
99
-
100
- self._spec: Tuple[str, str] = (
101
- match.group("operator").strip(),
102
- match.group("version").strip(),
103
- )
104
-
105
- # Store whether or not this Specifier should accept prereleases
106
- self._prereleases = prereleases
107
-
108
- def __repr__(self) -> str:
109
- pre = (
110
- f", prereleases={self.prereleases!r}"
111
- if self._prereleases is not None
112
- else ""
113
- )
114
-
115
- return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
116
-
117
- def __str__(self) -> str:
118
- return "{}{}".format(*self._spec)
119
-
120
- @property
121
- def _canonical_spec(self) -> Tuple[str, str]:
122
- return self._spec[0], canonicalize_version(self._spec[1])
123
-
124
- def __hash__(self) -> int:
125
- return hash(self._canonical_spec)
126
-
127
- def __eq__(self, other: object) -> bool:
128
- if isinstance(other, str):
129
- try:
130
- other = self.__class__(str(other))
131
- except InvalidSpecifier:
132
- return NotImplemented
133
- elif not isinstance(other, self.__class__):
134
- return NotImplemented
135
-
136
- return self._canonical_spec == other._canonical_spec
137
-
138
- def _get_operator(self, op: str) -> CallableOperator:
139
- operator_callable: CallableOperator = getattr(
140
- self, f"_compare_{self._operators[op]}"
141
- )
142
- return operator_callable
143
-
144
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
145
- if not isinstance(version, (LegacyVersion, Version)):
146
- version = parse(version)
147
- return version
148
-
149
- @property
150
- def operator(self) -> str:
151
- return self._spec[0]
152
-
153
- @property
154
- def version(self) -> str:
155
- return self._spec[1]
156
-
157
- @property
158
- def prereleases(self) -> Optional[bool]:
159
- return self._prereleases
160
-
161
- @prereleases.setter
162
- def prereleases(self, value: bool) -> None:
163
- self._prereleases = value
164
-
165
- def __contains__(self, item: str) -> bool:
166
- return self.contains(item)
167
-
168
- def contains(
169
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
170
- ) -> bool:
171
-
172
- # Determine if prereleases are to be allowed or not.
173
- if prereleases is None:
174
- prereleases = self.prereleases
175
-
176
- # Normalize item to a Version or LegacyVersion, this allows us to have
177
- # a shortcut for ``"2.0" in Specifier(">=2")
178
- normalized_item = self._coerce_version(item)
179
-
180
- # Determine if we should be supporting prereleases in this specifier
181
- # or not, if we do not support prereleases than we can short circuit
182
- # logic if this version is a prereleases.
183
- if normalized_item.is_prerelease and not prereleases:
184
- return False
185
-
186
- # Actually do the comparison to determine if this item is contained
187
- # within this Specifier or not.
188
- operator_callable: CallableOperator = self._get_operator(self.operator)
189
- return operator_callable(normalized_item, self.version)
190
-
191
- def filter(
192
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
193
- ) -> Iterable[VersionTypeVar]:
194
-
195
- yielded = False
196
- found_prereleases = []
197
-
198
- kw = {"prereleases": prereleases if prereleases is not None else True}
199
-
200
- # Attempt to iterate over all the values in the iterable and if any of
201
- # them match, yield them.
202
- for version in iterable:
203
- parsed_version = self._coerce_version(version)
204
-
205
- if self.contains(parsed_version, **kw):
206
- # If our version is a prerelease, and we were not set to allow
207
- # prereleases, then we'll store it for later in case nothing
208
- # else matches this specifier.
209
- if parsed_version.is_prerelease and not (
210
- prereleases or self.prereleases
211
- ):
212
- found_prereleases.append(version)
213
- # Either this is not a prerelease, or we should have been
214
- # accepting prereleases from the beginning.
215
- else:
216
- yielded = True
217
- yield version
218
-
219
- # Now that we've iterated over everything, determine if we've yielded
220
- # any values, and if we have not and we have any prereleases stored up
221
- # then we will go ahead and yield the prereleases.
222
- if not yielded and found_prereleases:
223
- for version in found_prereleases:
224
- yield version
225
-
226
-
227
- class LegacySpecifier(_IndividualSpecifier):
228
-
229
- _regex_str = r"""
230
- (?P<operator>(==|!=|<=|>=|<|>))
231
- \s*
232
- (?P<version>
233
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
234
- # string can be just about anything, we match everything
235
- # except for whitespace, a semi-colon for marker support,
236
- # a closing paren since versions can be enclosed in
237
- # them, and a comma since it's a version separator.
238
- )
239
- """
240
-
241
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
242
-
243
- _operators = {
244
- "==": "equal",
245
- "!=": "not_equal",
246
- "<=": "less_than_equal",
247
- ">=": "greater_than_equal",
248
- "<": "less_than",
249
- ">": "greater_than",
250
- }
251
-
252
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
253
- super().__init__(spec, prereleases)
254
-
255
- warnings.warn(
256
- "Creating a LegacyVersion has been deprecated and will be "
257
- "removed in the next major release",
258
- DeprecationWarning,
259
- )
260
-
261
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
262
- if not isinstance(version, LegacyVersion):
263
- version = LegacyVersion(str(version))
264
- return version
265
-
266
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
267
- return prospective == self._coerce_version(spec)
268
-
269
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
270
- return prospective != self._coerce_version(spec)
271
-
272
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
273
- return prospective <= self._coerce_version(spec)
274
-
275
- def _compare_greater_than_equal(
276
- self, prospective: LegacyVersion, spec: str
277
- ) -> bool:
278
- return prospective >= self._coerce_version(spec)
279
-
280
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
281
- return prospective < self._coerce_version(spec)
282
-
283
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
284
- return prospective > self._coerce_version(spec)
285
-
286
-
287
- def _require_version_compare(
288
- fn: Callable[["Specifier", ParsedVersion, str], bool]
289
- ) -> Callable[["Specifier", ParsedVersion, str], bool]:
290
- @functools.wraps(fn)
291
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
292
- if not isinstance(prospective, Version):
293
- return False
294
- return fn(self, prospective, spec)
295
-
296
- return wrapped
297
-
298
-
299
- class Specifier(_IndividualSpecifier):
300
-
301
- _regex_str = r"""
302
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
303
- (?P<version>
304
- (?:
305
- # The identity operators allow for an escape hatch that will
306
- # do an exact string match of the version you wish to install.
307
- # This will not be parsed by PEP 440 and we cannot determine
308
- # any semantic meaning from it. This operator is discouraged
309
- # but included entirely as an escape hatch.
310
- (?<====) # Only match for the identity operator
311
- \s*
312
- [^\s]* # We just match everything, except for whitespace
313
- # since we are only testing for strict identity.
314
- )
315
- |
316
- (?:
317
- # The (non)equality operators allow for wild card and local
318
- # versions to be specified so we have to define these two
319
- # operators separately to enable that.
320
- (?<===|!=) # Only match for equals and not equals
321
-
322
- \s*
323
- v?
324
- (?:[0-9]+!)? # epoch
325
- [0-9]+(?:\.[0-9]+)* # release
326
- (?: # pre release
327
- [-_\.]?
328
- (a|b|c|rc|alpha|beta|pre|preview)
329
- [-_\.]?
330
- [0-9]*
331
- )?
332
- (?: # post release
333
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
334
- )?
335
-
336
- # You cannot use a wild card and a dev or local version
337
- # together so group them with a | and make them optional.
338
- (?:
339
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
340
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
341
- |
342
- \.\* # Wild card syntax of .*
343
- )?
344
- )
345
- |
346
- (?:
347
- # The compatible operator requires at least two digits in the
348
- # release segment.
349
- (?<=~=) # Only match for the compatible operator
350
-
351
- \s*
352
- v?
353
- (?:[0-9]+!)? # epoch
354
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
355
- (?: # pre release
356
- [-_\.]?
357
- (a|b|c|rc|alpha|beta|pre|preview)
358
- [-_\.]?
359
- [0-9]*
360
- )?
361
- (?: # post release
362
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
363
- )?
364
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
365
- )
366
- |
367
- (?:
368
- # All other operators only allow a sub set of what the
369
- # (non)equality operators do. Specifically they do not allow
370
- # local versions to be specified nor do they allow the prefix
371
- # matching wild cards.
372
- (?<!==|!=|~=) # We have special cases for these
373
- # operators so we want to make sure they
374
- # don't match here.
375
-
376
- \s*
377
- v?
378
- (?:[0-9]+!)? # epoch
379
- [0-9]+(?:\.[0-9]+)* # release
380
- (?: # pre release
381
- [-_\.]?
382
- (a|b|c|rc|alpha|beta|pre|preview)
383
- [-_\.]?
384
- [0-9]*
385
- )?
386
- (?: # post release
387
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
388
- )?
389
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
390
- )
391
- )
392
- """
393
-
394
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
395
-
396
- _operators = {
397
- "~=": "compatible",
398
- "==": "equal",
399
- "!=": "not_equal",
400
- "<=": "less_than_equal",
401
- ">=": "greater_than_equal",
402
- "<": "less_than",
403
- ">": "greater_than",
404
- "===": "arbitrary",
405
- }
406
-
407
- @_require_version_compare
408
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
409
-
410
- # Compatible releases have an equivalent combination of >= and ==. That
411
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
412
- # implement this in terms of the other specifiers instead of
413
- # implementing it ourselves. The only thing we need to do is construct
414
- # the other specifiers.
415
-
416
- # We want everything but the last item in the version, but we want to
417
- # ignore suffix segments.
418
- prefix = ".".join(
419
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
420
- )
421
-
422
- # Add the prefix notation to the end of our string
423
- prefix += ".*"
424
-
425
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
426
- prospective, prefix
427
- )
428
-
429
- @_require_version_compare
430
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
431
-
432
- # We need special logic to handle prefix matching
433
- if spec.endswith(".*"):
434
- # In the case of prefix matching we want to ignore local segment.
435
- prospective = Version(prospective.public)
436
- # Split the spec out by dots, and pretend that there is an implicit
437
- # dot in between a release segment and a pre-release segment.
438
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
439
-
440
- # Split the prospective version out by dots, and pretend that there
441
- # is an implicit dot in between a release segment and a pre-release
442
- # segment.
443
- split_prospective = _version_split(str(prospective))
444
-
445
- # Shorten the prospective version to be the same length as the spec
446
- # so that we can determine if the specifier is a prefix of the
447
- # prospective version or not.
448
- shortened_prospective = split_prospective[: len(split_spec)]
449
-
450
- # Pad out our two sides with zeros so that they both equal the same
451
- # length.
452
- padded_spec, padded_prospective = _pad_version(
453
- split_spec, shortened_prospective
454
- )
455
-
456
- return padded_prospective == padded_spec
457
- else:
458
- # Convert our spec string into a Version
459
- spec_version = Version(spec)
460
-
461
- # If the specifier does not have a local segment, then we want to
462
- # act as if the prospective version also does not have a local
463
- # segment.
464
- if not spec_version.local:
465
- prospective = Version(prospective.public)
466
-
467
- return prospective == spec_version
468
-
469
- @_require_version_compare
470
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
471
- return not self._compare_equal(prospective, spec)
472
-
473
- @_require_version_compare
474
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
475
-
476
- # NB: Local version identifiers are NOT permitted in the version
477
- # specifier, so local version labels can be universally removed from
478
- # the prospective version.
479
- return Version(prospective.public) <= Version(spec)
480
-
481
- @_require_version_compare
482
- def _compare_greater_than_equal(
483
- self, prospective: ParsedVersion, spec: str
484
- ) -> bool:
485
-
486
- # NB: Local version identifiers are NOT permitted in the version
487
- # specifier, so local version labels can be universally removed from
488
- # the prospective version.
489
- return Version(prospective.public) >= Version(spec)
490
-
491
- @_require_version_compare
492
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
493
-
494
- # Convert our spec to a Version instance, since we'll want to work with
495
- # it as a version.
496
- spec = Version(spec_str)
497
-
498
- # Check to see if the prospective version is less than the spec
499
- # version. If it's not we can short circuit and just return False now
500
- # instead of doing extra unneeded work.
501
- if not prospective < spec:
502
- return False
503
-
504
- # This special case is here so that, unless the specifier itself
505
- # includes is a pre-release version, that we do not accept pre-release
506
- # versions for the version mentioned in the specifier (e.g. <3.1 should
507
- # not match 3.1.dev0, but should match 3.0.dev0).
508
- if not spec.is_prerelease and prospective.is_prerelease:
509
- if Version(prospective.base_version) == Version(spec.base_version):
510
- return False
511
-
512
- # If we've gotten to here, it means that prospective version is both
513
- # less than the spec version *and* it's not a pre-release of the same
514
- # version in the spec.
515
- return True
516
-
517
- @_require_version_compare
518
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
519
-
520
- # Convert our spec to a Version instance, since we'll want to work with
521
- # it as a version.
522
- spec = Version(spec_str)
523
-
524
- # Check to see if the prospective version is greater than the spec
525
- # version. If it's not we can short circuit and just return False now
526
- # instead of doing extra unneeded work.
527
- if not prospective > spec:
528
- return False
529
-
530
- # This special case is here so that, unless the specifier itself
531
- # includes is a post-release version, that we do not accept
532
- # post-release versions for the version mentioned in the specifier
533
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
534
- if not spec.is_postrelease and prospective.is_postrelease:
535
- if Version(prospective.base_version) == Version(spec.base_version):
536
- return False
537
-
538
- # Ensure that we do not allow a local version of the version mentioned
539
- # in the specifier, which is technically greater than, to match.
540
- if prospective.local is not None:
541
- if Version(prospective.base_version) == Version(spec.base_version):
542
- return False
543
-
544
- # If we've gotten to here, it means that prospective version is both
545
- # greater than the spec version *and* it's not a pre-release of the
546
- # same version in the spec.
547
- return True
548
-
549
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
550
- return str(prospective).lower() == str(spec).lower()
551
-
552
- @property
553
- def prereleases(self) -> bool:
554
-
555
- # If there is an explicit prereleases set for this, then we'll just
556
- # blindly use that.
557
- if self._prereleases is not None:
558
- return self._prereleases
559
-
560
- # Look at all of our specifiers and determine if they are inclusive
561
- # operators, and if they are if they are including an explicit
562
- # prerelease.
563
- operator, version = self._spec
564
- if operator in ["==", ">=", "<=", "~=", "==="]:
565
- # The == specifier can include a trailing .*, if it does we
566
- # want to remove before parsing.
567
- if operator == "==" and version.endswith(".*"):
568
- version = version[:-2]
569
-
570
- # Parse the version, and if it is a pre-release than this
571
- # specifier allows pre-releases.
572
- if parse(version).is_prerelease:
573
- return True
574
-
575
- return False
576
-
577
- @prereleases.setter
578
- def prereleases(self, value: bool) -> None:
579
- self._prereleases = value
580
-
581
-
582
- _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
583
-
584
-
585
- def _version_split(version: str) -> List[str]:
586
- result: List[str] = []
587
- for item in version.split("."):
588
- match = _prefix_regex.search(item)
589
- if match:
590
- result.extend(match.groups())
591
- else:
592
- result.append(item)
593
- return result
594
-
595
-
596
- def _is_not_suffix(segment: str) -> bool:
597
- return not any(
598
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
599
- )
600
-
601
-
602
- def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
603
- left_split, right_split = [], []
604
-
605
- # Get the release segment of our versions
606
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
607
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
608
-
609
- # Get the rest of our versions
610
- left_split.append(left[len(left_split[0]) :])
611
- right_split.append(right[len(right_split[0]) :])
612
-
613
- # Insert our padding
614
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
615
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
616
-
617
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
618
-
619
-
620
- class SpecifierSet(BaseSpecifier):
621
- def __init__(
622
- self, specifiers: str = "", prereleases: Optional[bool] = None
623
- ) -> None:
624
-
625
- # Split on , to break each individual specifier into it's own item, and
626
- # strip each item to remove leading/trailing whitespace.
627
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
628
-
629
- # Parsed each individual specifier, attempting first to make it a
630
- # Specifier and falling back to a LegacySpecifier.
631
- parsed: Set[_IndividualSpecifier] = set()
632
- for specifier in split_specifiers:
633
- try:
634
- parsed.add(Specifier(specifier))
635
- except InvalidSpecifier:
636
- parsed.add(LegacySpecifier(specifier))
637
-
638
- # Turn our parsed specifiers into a frozen set and save them for later.
639
- self._specs = frozenset(parsed)
640
-
641
- # Store our prereleases value so we can use it later to determine if
642
- # we accept prereleases or not.
643
- self._prereleases = prereleases
644
-
645
- def __repr__(self) -> str:
646
- pre = (
647
- f", prereleases={self.prereleases!r}"
648
- if self._prereleases is not None
649
- else ""
650
- )
651
-
652
- return f"<SpecifierSet({str(self)!r}{pre})>"
653
-
654
- def __str__(self) -> str:
655
- return ",".join(sorted(str(s) for s in self._specs))
656
-
657
- def __hash__(self) -> int:
658
- return hash(self._specs)
659
-
660
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
661
- if isinstance(other, str):
662
- other = SpecifierSet(other)
663
- elif not isinstance(other, SpecifierSet):
664
- return NotImplemented
665
-
666
- specifier = SpecifierSet()
667
- specifier._specs = frozenset(self._specs | other._specs)
668
-
669
- if self._prereleases is None and other._prereleases is not None:
670
- specifier._prereleases = other._prereleases
671
- elif self._prereleases is not None and other._prereleases is None:
672
- specifier._prereleases = self._prereleases
673
- elif self._prereleases == other._prereleases:
674
- specifier._prereleases = self._prereleases
675
- else:
676
- raise ValueError(
677
- "Cannot combine SpecifierSets with True and False prerelease "
678
- "overrides."
679
- )
680
-
681
- return specifier
682
-
683
- def __eq__(self, other: object) -> bool:
684
- if isinstance(other, (str, _IndividualSpecifier)):
685
- other = SpecifierSet(str(other))
686
- elif not isinstance(other, SpecifierSet):
687
- return NotImplemented
688
-
689
- return self._specs == other._specs
690
-
691
- def __len__(self) -> int:
692
- return len(self._specs)
693
-
694
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
695
- return iter(self._specs)
696
-
697
- @property
698
- def prereleases(self) -> Optional[bool]:
699
-
700
- # If we have been given an explicit prerelease modifier, then we'll
701
- # pass that through here.
702
- if self._prereleases is not None:
703
- return self._prereleases
704
-
705
- # If we don't have any specifiers, and we don't have a forced value,
706
- # then we'll just return None since we don't know if this should have
707
- # pre-releases or not.
708
- if not self._specs:
709
- return None
710
-
711
- # Otherwise we'll see if any of the given specifiers accept
712
- # prereleases, if any of them do we'll return True, otherwise False.
713
- return any(s.prereleases for s in self._specs)
714
-
715
- @prereleases.setter
716
- def prereleases(self, value: bool) -> None:
717
- self._prereleases = value
718
-
719
- def __contains__(self, item: UnparsedVersion) -> bool:
720
- return self.contains(item)
721
-
722
- def contains(
723
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
724
- ) -> bool:
725
-
726
- # Ensure that our item is a Version or LegacyVersion instance.
727
- if not isinstance(item, (LegacyVersion, Version)):
728
- item = parse(item)
729
-
730
- # Determine if we're forcing a prerelease or not, if we're not forcing
731
- # one for this particular filter call, then we'll use whatever the
732
- # SpecifierSet thinks for whether or not we should support prereleases.
733
- if prereleases is None:
734
- prereleases = self.prereleases
735
-
736
- # We can determine if we're going to allow pre-releases by looking to
737
- # see if any of the underlying items supports them. If none of them do
738
- # and this item is a pre-release then we do not allow it and we can
739
- # short circuit that here.
740
- # Note: This means that 1.0.dev1 would not be contained in something
741
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
742
- if not prereleases and item.is_prerelease:
743
- return False
744
-
745
- # We simply dispatch to the underlying specs here to make sure that the
746
- # given version is contained within all of them.
747
- # Note: This use of all() here means that an empty set of specifiers
748
- # will always return True, this is an explicit design decision.
749
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
750
-
751
- def filter(
752
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
753
- ) -> Iterable[VersionTypeVar]:
754
-
755
- # Determine if we're forcing a prerelease or not, if we're not forcing
756
- # one for this particular filter call, then we'll use whatever the
757
- # SpecifierSet thinks for whether or not we should support prereleases.
758
- if prereleases is None:
759
- prereleases = self.prereleases
760
-
761
- # If we have any specifiers, then we want to wrap our iterable in the
762
- # filter method for each one, this will act as a logical AND amongst
763
- # each specifier.
764
- if self._specs:
765
- for spec in self._specs:
766
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
767
- return iterable
768
- # If we do not have any specifiers, then we need to have a rough filter
769
- # which will filter out any pre-releases, unless there are no final
770
- # releases, and which will filter out LegacyVersion in general.
771
- else:
772
- filtered: List[VersionTypeVar] = []
773
- found_prereleases: List[VersionTypeVar] = []
774
-
775
- item: UnparsedVersion
776
- parsed_version: Union[Version, LegacyVersion]
777
-
778
- for item in iterable:
779
- # Ensure that we some kind of Version class for this item.
780
- if not isinstance(item, (LegacyVersion, Version)):
781
- parsed_version = parse(item)
782
- else:
783
- parsed_version = item
784
-
785
- # Filter out any item which is parsed as a LegacyVersion
786
- if isinstance(parsed_version, LegacyVersion):
787
- continue
788
-
789
- # Store any item which is a pre-release for later unless we've
790
- # already found a final version or we are accepting prereleases
791
- if parsed_version.is_prerelease and not prereleases:
792
- if not filtered:
793
- found_prereleases.append(item)
794
- else:
795
- filtered.append(item)
796
-
797
- # If we've found no items except for pre-releases, then we'll go
798
- # ahead and use the pre-releases
799
- if not filtered and found_prereleases and prereleases is None:
800
- return found_prereleases
801
-
802
- return filtered
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/MARCI-NFL-Betting/Templates/index.html DELETED
@@ -1,724 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
4
- <head>
5
- <link rel="shortcut icon" type="image/x-icon" href="https://images.squarespace-cdn.com/content/v1/64790f5777b5d772678cce83/6d71eaee-f825-4324-be9b-2def32469eac/favicon.ico?format=100w">
6
- <title>MARCI - NFL Betting</title>
7
- </head>
8
- <style>
9
- body {
10
- max-width: 90vw;
11
- margin: auto;
12
- background-color: black;
13
- font-family: 'Helvetica';
14
- justify-content: center;
15
- text-align: center;
16
- padding-top: 2%;
17
- padding-bottom: 5%;
18
- }
19
- p {
20
- color: #f2f2f2;
21
- }
22
- h1 {
23
- color: #f2f2f2;
24
- margin-top: 40px;
25
- margin-bottom: 10px;
26
- font-size: xxx-large;
27
- }
28
- h2 {
29
- margin-top: 0px;
30
- color: #f2f2f2;
31
- }
32
- h3 {
33
- color: #f2f2f2;
34
- margin: 0px;
35
- }
36
-
37
- table {
38
- transition: 0.3s ease;
39
- margin-top: 20px;
40
- width: 80%;
41
- border-collapse: collapse;
42
- text-align: center;
43
- }
44
- .table-div {
45
- display: flex;
46
- justify-content: center;
47
- }
48
- th, td {
49
- color: #f2f2f2;
50
- border: 1px solid black;
51
- text-align: center;
52
- padding: 8px;
53
- }
54
- th {
55
- background-color: black;
56
- }
57
- tr {
58
- background-color: black;
59
- }
60
- tr:nth-child(even) {
61
- background-color: rgb(10, 10, 5);
62
- }
63
- td img {
64
- display: block;
65
- margin: auto;
66
- }
67
- input[type="text"] {
68
- font-size: 12pt;
69
- width: 45px;
70
- height: 30px;
71
- text-align: center;
72
- background-color: transparent;
73
- border-radius: 5px;
74
- transition: 0.3s ease;
75
- color: #f2f2f2;
76
- border: none;
77
- }
78
-
79
- input[type="text"]:hover {
80
- background-color:rgb(30, 30, 30);
81
- }
82
- button {
83
- font-size: 12pt;
84
- background-color: rgb(30, 30, 30);
85
- color: #ffffff;
86
- padding: 10px 20px;
87
- border: none;
88
- border-radius: 5px;
89
- margin-top: 40px;
90
- width: 80%;
91
- transition: all 0.3s ease;
92
- }
93
- button:hover {
94
- color: rgb(0, 0, 0);
95
- background-color: rgb(255, 255, 255);
96
- cursor: pointer;
97
- }
98
- .winner-wrapper {
99
- cursor: default;
100
- position: relative;
101
- width: 100%;
102
- text-align: center;
103
- display: flex;
104
- justify-content: center;
105
- align-items: center;
106
- transition: 0.3s ease;
107
- }
108
- .winner-image {
109
- height: auto;
110
- margin: 0;
111
- transition: 0.3s ease;
112
- }
113
-
114
- .over-under-wrapper {
115
- cursor: default;
116
- position: relative;
117
- width: 100%;
118
- height: 50px;
119
- display: flex;
120
- align-items: center;
121
- justify-content: center;
122
- transition: 0.3s ease;
123
- }
124
- .over-under-text {
125
- display: inline-block;
126
- margin: 0;
127
- margin-right: 2px;
128
- font-weight: bold;
129
- }
130
- .over {
131
- color: rgb(255, 255, 255);
132
- }
133
- .under {
134
- color: rgb(255, 255, 255);
135
- }
136
- .na {
137
- color: white;
138
- }
139
-
140
- .highlight {
141
- background: rgb(30, 30, 30) !important;
142
- border: 2px solid rgb(30, 30, 30) !important;
143
- border-radius: 10px !important;
144
- }
145
-
146
- .force-repaint { transform: translateZ(0); }
147
-
148
- .hidden {
149
- opacity: 0;
150
- }
151
-
152
- .section-container {
153
- display: flex;
154
- justify-content: space-between;
155
- }
156
-
157
- .section {
158
- padding: 30px;
159
- text-align: left;
160
- border-style: solid;
161
- border-width: 1px;
162
- border-color: rgb(61, 61, 61);
163
- width: 48%;
164
- }
165
-
166
- .content {
167
- width: 100%;
168
- }
169
-
170
- .content img {
171
- width: 100%;
172
- height: auto;
173
- margin-top: 20px;
174
- margin-bottom: 20px;
175
- }
176
-
177
- .divider {
178
- border: 0;
179
- height: 1px;
180
- background: rgb(61, 61, 61);
181
- margin-top: 50px;
182
- margin-bottom: 50px;
183
- width: 80%;
184
- }
185
-
186
- .label {
187
- color: rgb(114, 114, 114);
188
- }
189
-
190
- .info {
191
- color: white;
192
- }
193
-
194
- a {
195
- color: white;
196
- }
197
-
198
- .scroll-banner {
199
- position: fixed;
200
- top: 0;
201
- left: 0;
202
- width: 100%;
203
- z-index: 999;
204
- width: 100%;
205
- display: flex;
206
- align-items: center;
207
- height: 30px;
208
- background-color: green;
209
- overflow: hidden;
210
- visibility: hidden;
211
- }
212
-
213
- .scroll-text {
214
- font-family: 'Helvetica';
215
- color: white;
216
- display: inline-block;
217
- animation: scrolling 10s linear infinite;
218
- white-space: nowrap;
219
- }
220
-
221
- @keyframes scrolling {
222
- 0% { transform: translateX(100vw); }
223
- 100% { transform: translateX(-100%); }
224
- }
225
-
226
- .emoji {
227
- margin-left: 5px;
228
- color: rgb(255, 255, 255);
229
- transition: 0.3s ease;
230
- }
231
-
232
- .spinner {
233
- margin: auto;
234
- display: block;
235
- border: 2px solid transparent;
236
- border-radius: 50%;
237
- border-top: 2px solid #6a6a6a;
238
- width: 16px;
239
- height: 16px;
240
- animation: spin 1s linear infinite;
241
- }
242
-
243
- #gradient {
244
- background: red;
245
- background: -webkit-linear-gradient(left, orange , yellow, green, cyan, blue, violet);
246
- background: -o-linear-gradient(right, orange, yellow, green, cyan, blue, violet);
247
- background: -moz-linear-gradient(right, orange, yellow, green, cyan, blue, violet);
248
- background: linear-gradient(to right, orange , yellow, green, cyan, blue, violet);
249
- background-clip: text;
250
- -webkit-background-clip: text;
251
- -webkit-text-fill-color: transparent;
252
- font-weight: bold;
253
- }
254
-
255
- .modelDetails {
256
- width: 80%;
257
- display: inline-block;
258
- margin-bottom: 40px;
259
- }
260
-
261
- #weekSelector {
262
- transition: 0.3s ease;
263
- border-radius: 10px;
264
- padding: 5px;
265
- color: white;
266
- background: rgb(30, 30, 30) !important;
267
- font-family: Arial, Helvetica, sans-serif;
268
- }
269
- #weekSelector:hover {
270
- opacity: 0.5;
271
- }
272
-
273
- @keyframes spin {
274
- 0% {
275
- transform: rotate(0deg);
276
- }
277
- 100% {
278
- transform: rotate(360deg);
279
- }
280
- }
281
-
282
- @media screen and (max-width: 768px) {
283
- .table-div {
284
- display: block;
285
- justify-content: center;
286
- }
287
- .winner-image {
288
- margin: 0;
289
- }
290
- .emoji {
291
- margin: 0;
292
- }
293
- .table-div{
294
- overflow-x: scroll;
295
- }
296
- .divider {
297
- width: 90%;
298
- }
299
- #modelDetails {
300
- width: 90%;
301
- }
302
- .button {
303
- width: 90%;
304
- }
305
- .section-container {
306
- display: inline;
307
- }
308
- .section {
309
- padding: 15px;
310
- width: auto;
311
- }
312
- }
313
-
314
- </style>
315
-
316
- <div class="scroll-banner">
317
- <div class="scroll-text">
318
- Predictions will begin at the conclusion of Week 1. Bet at your own risk. Know your limits. And most importantly, have fun!
319
- </div>
320
- </div>
321
-
322
- <body>
323
- <h1>M A R C I</h1>
324
- <div class="info">
325
- <span class="label"><i>Moore's Algorithm for Risky Capital Investments</i></span><br><br>
326
-
327
- <span id="gradient">Remember to have fun!</span><br><br>
328
-
329
- <span class="label"><b>Record through {{ latest_game }}</b></span><br>
330
- <span class="label">Winners:</span> {{ winners_correct }}-{{winners_incorrect}}{{winners_tie}}<span class="label"> ({{ winners_return }})</span><br>
331
- <span class="label">Over/Unders:</span> {{over_unders_correct}}-{{over_unders_incorrect}}{{over_unders_push}}<span class="label"> ({{over_unders_return}})</span><br><br>
332
- </div>
333
-
334
- <select id="weekSelector">
335
- </select>
336
-
337
- <div class="table-div">
338
- <table id="gameTable">
339
- <tr>
340
- <th>Date</th>
341
- <th>Away</th>
342
- <th>Home</th>
343
- <th>O/U</th>
344
- <th>Predicted Winner</th>
345
- <th>Predicted O/U</th>
346
- </tr>
347
- </table>
348
- </div>
349
- <button id="submitButton">
350
- Predict
351
- </button>
352
-
353
- <hr class="divider">
354
-
355
- <div class="modelDetails">
356
- <h2>Model Train/Test Details</h2>
357
- <div class="section-container">
358
- <div class="section">
359
- <h3>Moneyline</h3>
360
- <div class="info"></h3><span class="label">Test Accuracy:</span> 71.4%<br></div>
361
- <div class="content">
362
- <img src="/Static/xgboost_ML_no_odds_71.4%25_dark.png" alt="Moneyline Model">
363
- <div class="info">
364
- <span class="label">Model:</span> XGBoost<br>
365
- <span class="label">Train/Test Split:</span> 1782/199<br>
366
- <span class="label">Max Depth:</span> 2<br>
367
- <span class="label">Learning Rate:</span> 0.01<br>
368
- <span class="label">Epochs:</span> 500
369
- </div>
370
- </div>
371
- </div>
372
- <div class="section">
373
- <h3>Over/Under</h3>
374
- <div class="content">
375
- <div class="info"></h3><span class="label">Test Accuracy:</span> 59.8%<br></div>
376
- <img src="/Static/xgboost_OU_no_odds_59.8%25_dark.png" alt="Over/Under Model">
377
- <div class="info">
378
- <span class="label">Model:</span> XGBoost<br>
379
- <span class="label">Train/Test Split:</span> 1782/199<br>
380
- <span class="label">Max Depth:</span> 6<br>
381
- <span class="label">Learning Rate:</span> 0.05<br>
382
- <span class="label">Epochs:</span> 300
383
- </div>
384
- </div>
385
- </div>
386
- </div>
387
- </div>
388
-
389
- <div class="modelDetails">
390
- <h2>Predictive Accuracy This Year</h2>
391
- <div class="section-container">
392
- <div class="section">
393
- <h3>Moneyline</h3>
394
- <div class="info">{{ winners_return }}.</div>
395
- <div class="content">
396
- <img src="/Static/Winner_Predictions_dark.png" alt="Moneyline Accuracy">
397
- </div>
398
- <div class="info"><span class="label">{{ winners_binom }}</span><br></div>
399
-
400
- </div>
401
- <div class="section">
402
- <h3>Over/Under</h3>
403
- <div class="info">{{ over_unders_return }}.</div>
404
- <div class="content">
405
- <img src="/Static/Over_Under_Predictions_dark.png" alt="Over/Under Model">
406
- </div>
407
- <div class="info"><span class="label">{{ over_unders_binom }}</span><br></div>
408
- </div>
409
- </div>
410
- </div>
411
-
412
- <p>🤗<a href="https://huggingface.co/spaces/BraydenMoore/MARCI-NFL-Betting/tree/main">See the Code</a></p>
413
-
414
-
415
-
416
-
417
- <script>
418
- async function fetchGames(selectedWeek) {
419
- const response = await fetch(`/get_games?week=${selectedWeek}`);
420
- const pulled_games = await response.json();
421
- const table = document.getElementById('gameTable');
422
-
423
- for(let i = table.rows.length - 1; i > 0; i--) {
424
- table.deleteRow(i);
425
- }
426
-
427
- const columns = ['Date','Away Team', 'Home Team'];
428
- let lines;
429
- try {
430
- const lines_response = await fetch('/get_lines');
431
- if (!lines_response.ok) {
432
- throw new Error(`HTTP error! status: ${lines_response.status}`);
433
- }
434
- lines = await lines_response.json();
435
- }
436
- catch (error) {
437
- lines = new Array(20).fill(0);
438
- }
439
-
440
- pulled_games.forEach((game, index) => {
441
- const row = table.insertRow(-1);
442
-
443
- columns.forEach((column) => {
444
- const cell = row.insertCell(-1);
445
- if (column === 'Away Team' || column === 'Home Team') {
446
- const img = document.createElement('img');
447
- img.src = `/Static/${game[column]}.webp`;
448
- img.alt = game[column];
449
- img.width = 50;
450
- cell.appendChild(img);
451
- } else {
452
- cell.textContent = game[column];
453
- cell.style.color = "rgb(114, 114, 114)";
454
- }
455
- });
456
-
457
- for (let i = 0; i < 3; i++) {
458
- const cell = row.insertCell(-1);
459
- if (i<1) {
460
- const input = document.createElement('input');
461
- input.type = 'text';
462
- input.value = lines[index];
463
- cell.appendChild(input);
464
- }
465
- }
466
- });
467
- }
468
-
469
-
470
- function submitData() {
471
- const predictButton = document.getElementById('submitButton');
472
-
473
- const table = document.getElementById('gameTable');
474
- const rows = table.querySelectorAll('tr');
475
- const games = [];
476
-
477
- rows.forEach((row, index) => {
478
- if (index === 0) return;
479
-
480
- const winnerCell = row.cells[row.cells.length - 2];
481
- const overUnderCell = row.cells[row.cells.length - 1];
482
- const spinnerDiv = document.createElement('div');
483
- spinnerDiv.className = 'spinner';
484
- winnerCell.innerHTML = '';
485
- overUnderCell.innerHTML = '';
486
- winnerCell.appendChild(spinnerDiv);
487
- overUnderCell.appendChild(spinnerDiv.cloneNode(true));
488
-
489
- const cells = row.querySelectorAll('td');
490
- const game = {};
491
-
492
- game.Date = cells[0].textContent;
493
- game.AwayTeam = cells[1].querySelector('img').alt;
494
- game.HomeTeam = cells[2].querySelector('img').alt;
495
- game.OverUnderLine = cells[3].querySelector('input').value;
496
- game.rowIndex = index - 1;
497
- games.push(game);
498
- });
499
-
500
-
501
- fetch('/submit_games', {
502
- method: 'POST',
503
- headers: {
504
- 'Content-Type': 'application/json',
505
- },
506
- body: JSON.stringify(games),
507
- })
508
- .then(response => response.json())
509
- .then(data => {
510
- if (data.moneylines && data.over_unders) {
511
- const table = document.getElementById('gameTable');
512
- const rows = table.querySelectorAll('tr');
513
-
514
- data.moneylines.forEach((moneyline, index) => {
515
- const row = rows[parseInt(moneyline.rowIndex) + 1];
516
-
517
- const winnerCell = row.cells[row.cells.length - 2];
518
- winnerCell.removeChild(winnerCell.querySelector('.spinner'));
519
- winnerCell.innerHTML = '';
520
-
521
- const wrapperDiv = document.createElement('div');
522
- wrapperDiv.className = 'winner-wrapper';
523
- if (moneyline.Probabilities[0] > 0.6){
524
- wrapperDiv.classList.add("highlight");
525
- }
526
- else {
527
- wrapperDiv.style.opacity = "0.5";
528
- }
529
-
530
- const winnerImg = document.createElement('img');
531
- winnerImg.src = `/Static/${moneyline.Winner}.webp`;
532
- winnerImg.alt = moneyline.Winner;
533
- winnerImg.width = 50;
534
- winnerImg.className = 'winner-image hidden';
535
- wrapperDiv.appendChild(winnerImg);
536
-
537
- const winnerEmojiDiv = document.createElement('div');
538
- winnerEmojiDiv.className = 'emoji';
539
-
540
- wrapperDiv.dataset.proba = Math.floor(moneyline.Probabilities[0] * 100).toFixed(0);
541
- if (moneyline.Winner[0] === moneyline.Result) {
542
- winnerEmojiDiv.textContent = '✅';
543
- }
544
- else if (moneyline.Result === 'Tie') {
545
- winnerEmojiDiv.textContent = '🔵';
546
- }
547
- else {
548
- winnerEmojiDiv.textContent = '❌';
549
- }
550
- if (moneyline.Result === 'N/A') {
551
- winnerEmojiDiv.textContent = `(${wrapperDiv.dataset.proba}%)`;
552
- }
553
- wrapperDiv.appendChild(winnerEmojiDiv);
554
-
555
- setTimeout(() => {
556
- winnerImg.classList.remove('hidden');
557
- }, 10);
558
-
559
- winnerCell.appendChild(wrapperDiv);
560
-
561
- const overUnderCell = row.cells[row.cells.length - 1];
562
- overUnderCell.removeChild(overUnderCell.querySelector('.spinner'));
563
- overUnderCell.innerHTML = '';
564
-
565
- const overUnderDiv = document.createElement('div');
566
- overUnderDiv.className = 'over-under-wrapper hidden';
567
- if (data.over_unders[index]['Probability'][0] > 0.6){
568
- overUnderDiv.classList.add("highlight");
569
- }
570
- else {
571
- overUnderDiv.style.opacity = "0.5";
572
- }
573
-
574
- const textDiv = document.createElement('div');
575
- textDiv.className = 'over-under-text';
576
- textDiv.textContent = data.over_unders[index]['Over/Under'];
577
- if (textDiv.textContent === 'Over') {
578
- overUnderDiv.className += ' over';
579
- } else if (textDiv.textContent === 'Under') {
580
- overUnderDiv.className += ' under';
581
- } else {
582
- overUnderDiv.className += ' na';
583
- }
584
-
585
- overUnderDiv.appendChild(textDiv);
586
-
587
- const overEmojiDiv = document.createElement('div');
588
- overEmojiDiv.className = 'emoji';
589
-
590
- overUnderDiv.dataset.proba = Math.floor(data.over_unders[index]['Probability'][0] * 100).toFixed(0);
591
- if (data.over_unders[index]['Over/Under'][0] === data.over_unders[index]['Result']) {
592
- overEmojiDiv.textContent = '✅';
593
- }
594
- else if (data.over_unders[index]['Result'] === 'Push') {
595
- overEmojiDiv.textContent = '🔵';
596
- }
597
- else {
598
- overEmojiDiv.textContent = '❌';
599
- }
600
- if (data.over_unders[index]['Result'] === 'N/A') {
601
- overEmojiDiv.textContent = `(${overUnderDiv.dataset.proba}%)`;
602
- }
603
- overUnderDiv.appendChild(overEmojiDiv);
604
-
605
- setTimeout(() => {
606
- overUnderDiv.classList.remove('hidden');
607
- }, 10);
608
-
609
- overUnderCell.appendChild(overUnderDiv);
610
-
611
- showProbabilityOnHover(wrapperDiv);
612
- showProbabilityOnHover(overUnderDiv);
613
-
614
- });
615
- }
616
- });
617
-
618
- }
619
-
620
- //Hover listener
621
- function showProbabilityOnHover(div) {
622
- let previousValue;
623
- let divText = div.children[1];
624
- let eventProcessed = false;
625
-
626
- function handleEnter() {
627
- if (eventProcessed) return; // Skip if an event has already been processed
628
-
629
- eventProcessed = true;
630
-
631
- if (divText.textContent !== `(${div.dataset.proba}%)`) {
632
- divText.style.opacity = 0;
633
-
634
- setTimeout(() => {
635
- previousValue = divText.textContent;
636
- divText.textContent = `(${div.dataset.proba}%)`;
637
- divText.style.opacity = 1;
638
- }, 300);
639
-
640
- setTimeout(() => {
641
- divText.style.opacity = 0;
642
- setTimeout(() => {
643
- divText.textContent = previousValue;
644
- divText.style.opacity = 1;
645
- eventProcessed = false; // Reset the flag
646
- }, 300);
647
- }, 1000);
648
- }
649
- }
650
-
651
- // For desktop
652
- div.addEventListener('mouseenter', handleEnter);
653
- // For mobile
654
- div.addEventListener('touchstart', handleEnter);
655
- }
656
-
657
- // Populate dropdown
658
- let selectedWeek;
659
- async function populateDropdown() {
660
- const weekSelector = document.getElementById('weekSelector');
661
- weekSelector.innerHTML = "";
662
-
663
- const response = await fetch('/get_weeks');
664
- const data = await response.json();
665
-
666
- data.forEach((week, index) => {
667
- const option = document.createElement('option');
668
- option.value = week;
669
- option.text = `Week ${week}`;
670
- weekSelector.appendChild(option);
671
-
672
- if (index === 0) {
673
- selectedWeek = week;
674
- }
675
- });
676
- }
677
-
678
-
679
- // Get new games when new week selected
680
- document.getElementById('weekSelector').addEventListener('change', function(event) {
681
- selectedWeek = event.target.value;
682
- getNew();
683
- });
684
-
685
-
686
- // Initial load
687
- function loadThings() {
688
- populateDropdown()
689
- .then(() => fetchGames(selectedWeek))
690
- .then(() => submitData())
691
- .catch(error => console.error(error));
692
- }
693
-
694
- // Get new
695
- async function getNew() {
696
- const table = document.getElementById('gameTable');
697
- table.style.opacity = "0.5";
698
-
699
- try {
700
- await fetchGames(selectedWeek);
701
- await submitData();
702
- table.style.opacity = "1";
703
- } catch (error) {
704
- console.error(error);
705
- }
706
- }
707
-
708
-
709
- // Submit on click, enter, and pageload
710
- loadThings();
711
-
712
- document.getElementById('submitButton').addEventListener('click', submitData);
713
-
714
- document.addEventListener('keydown', function(event) {
715
- if (event.keyCode === 13) {
716
- submitData();
717
- }
718
- });
719
-
720
-
721
- </script>
722
-
723
- </body>
724
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/a-random-unsecured-camera/Dockerfile DELETED
@@ -1,29 +0,0 @@
1
- # Use the official lightweight Python image.
2
- FROM python:3.11
3
-
4
- # Allow statements and log messages to immediately appear in the logs
5
- ENV PYTHONUNBUFFERED True
6
-
7
- # Copy local code to the container image.
8
- ENV APP_HOME /app
9
- WORKDIR $APP_HOME
10
- COPY . ./
11
-
12
- # Install production dependencies.
13
- RUN pip install --no-cache-dir -r requirements.txt
14
-
15
- # Create a non-root user and switch to it
16
- RUN useradd -m -u 1000 user
17
- USER user
18
- ENV HOME=/home/user \
19
- PATH=/home/user/.local/bin:$PATH
20
-
21
- # Set work directory
22
- WORKDIR $APP_HOME
23
-
24
- # Change ownership of app files to the new user
25
- COPY --chown=user . $HOME/app
26
-
27
- # Run the web service on container startup.
28
- CMD exec gunicorn --bind 0.0.0.0:7860 --workers 4 --threads 16 main:app
29
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/rrpn.py DELETED
@@ -1,74 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- from typing import Dict
4
- import torch
5
-
6
- from detectron2.layers import ShapeSpec
7
-
8
- from ..box_regression import Box2BoxTransformRotated
9
- from .build import PROPOSAL_GENERATOR_REGISTRY
10
- from .rpn import RPN
11
- from .rrpn_outputs import RRPNOutputs, find_top_rrpn_proposals
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- @PROPOSAL_GENERATOR_REGISTRY.register()
17
- class RRPN(RPN):
18
- """
19
- Rotated RPN subnetwork.
20
- Please refer to https://arxiv.org/pdf/1703.01086.pdf for the original RRPN paper:
21
- Ma, J., Shao, W., Ye, H., Wang, L., Wang, H., Zheng, Y., & Xue, X. (2018).
22
- Arbitrary-oriented scene text detection via rotation proposals.
23
- IEEE Transactions on Multimedia, 20(11), 3111-3122.
24
- """
25
-
26
- def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
27
- super().__init__(cfg, input_shape)
28
- self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
29
-
30
- def forward(self, images, features, gt_instances=None):
31
- # same signature as RPN.forward
32
- gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None
33
- del gt_instances
34
- features = [features[f] for f in self.in_features]
35
- pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
36
- anchors = self.anchor_generator(features)
37
-
38
- outputs = RRPNOutputs(
39
- self.box2box_transform,
40
- self.anchor_matcher,
41
- self.batch_size_per_image,
42
- self.positive_fraction,
43
- images,
44
- pred_objectness_logits,
45
- pred_anchor_deltas,
46
- anchors,
47
- self.boundary_threshold,
48
- gt_boxes,
49
- self.smooth_l1_beta,
50
- )
51
-
52
- if self.training:
53
- losses = outputs.losses()
54
- else:
55
- losses = {}
56
-
57
- with torch.no_grad():
58
- # Find the top proposals by applying NMS and removing boxes that
59
- # are too small. The proposals are treated as fixed for approximate
60
- # joint training with roi heads. This approach ignores the derivative
61
- # w.r.t. the proposal boxes’ coordinates that are also network
62
- # responses, so is approximate.
63
- proposals = find_top_rrpn_proposals(
64
- outputs.predict_proposals(),
65
- outputs.predict_objectness_logits(),
66
- images,
67
- self.nms_thresh,
68
- self.pre_nms_topk[self.training],
69
- self.post_nms_topk[self.training],
70
- self.min_box_side_len,
71
- self.training,
72
- )
73
-
74
- return proposals, losses
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/extrema.h DELETED
@@ -1,568 +0,0 @@
1
- /*******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/config.h>
32
- #include <thrust/system/cuda/detail/reduce.h>
33
-
34
- #include <thrust/detail/cstdint.h>
35
- #include <thrust/detail/temporary_array.h>
36
- #include <thrust/extrema.h>
37
- #include <thrust/pair.h>
38
- #include <thrust/distance.h>
39
-
40
- namespace thrust
41
- {
42
- namespace cuda_cub {
43
-
44
- namespace __extrema {
45
-
46
- template <class InputType, class IndexType, class Predicate>
47
- struct arg_min_f
48
- {
49
- Predicate predicate;
50
- typedef tuple<InputType, IndexType> pair_type;
51
-
52
- __host__ __device__
53
- arg_min_f(Predicate p) : predicate(p) {}
54
-
55
- pair_type __device__
56
- operator()(pair_type const &lhs, pair_type const &rhs)
57
- {
58
- InputType const &rhs_value = get<0>(rhs);
59
- InputType const &lhs_value = get<0>(lhs);
60
- IndexType const &rhs_key = get<1>(rhs);
61
- IndexType const &lhs_key = get<1>(lhs);
62
-
63
- // check values first
64
- if (predicate(lhs_value, rhs_value))
65
- return lhs;
66
- else if (predicate(rhs_value, lhs_value))
67
- return rhs;
68
-
69
- // values are equivalent, prefer smaller index
70
- if (lhs_key < rhs_key)
71
- return lhs;
72
- else
73
- return rhs;
74
- }
75
- }; // struct arg_min_f
76
-
77
- template <class InputType, class IndexType, class Predicate>
78
- struct arg_max_f
79
- {
80
- Predicate predicate;
81
- typedef tuple<InputType, IndexType> pair_type;
82
-
83
- __host__ __device__
84
- arg_max_f(Predicate p) : predicate(p) {}
85
-
86
- pair_type __device__
87
- operator()(pair_type const &lhs, pair_type const &rhs)
88
- {
89
- InputType const &rhs_value = get<0>(rhs);
90
- InputType const &lhs_value = get<0>(lhs);
91
- IndexType const &rhs_key = get<1>(rhs);
92
- IndexType const &lhs_key = get<1>(lhs);
93
-
94
- // check values first
95
- if (predicate(lhs_value, rhs_value))
96
- return rhs;
97
- else if (predicate(rhs_value, lhs_value))
98
- return lhs;
99
-
100
- // values are equivalent, prefer smaller index
101
- if (lhs_key < rhs_key)
102
- return lhs;
103
- else
104
- return rhs;
105
- }
106
- }; // struct arg_max_f
107
-
108
- template<class InputType, class IndexType, class Predicate>
109
- struct arg_minmax_f
110
- {
111
- Predicate predicate;
112
-
113
- typedef tuple<InputType, IndexType> pair_type;
114
- typedef tuple<pair_type, pair_type> two_pairs_type;
115
-
116
- typedef arg_min_f<InputType, IndexType, Predicate> arg_min_t;
117
- typedef arg_max_f<InputType, IndexType, Predicate> arg_max_t;
118
-
119
- __host__ __device__
120
- arg_minmax_f(Predicate p) : predicate(p)
121
- {
122
- }
123
-
124
- two_pairs_type __device__
125
- operator()(two_pairs_type const &lhs, two_pairs_type const &rhs)
126
- {
127
- pair_type const &rhs_min = get<0>(rhs);
128
- pair_type const &lhs_min = get<0>(lhs);
129
- pair_type const &rhs_max = get<1>(rhs);
130
- pair_type const &lhs_max = get<1>(lhs);
131
- return thrust::make_tuple(arg_min_t(predicate)(lhs_min, rhs_min),
132
- arg_max_t(predicate)(lhs_max, rhs_max));
133
- }
134
-
135
- struct duplicate_tuple
136
- {
137
- __device__ two_pairs_type
138
- operator()(pair_type const &t)
139
- {
140
- return thrust::make_tuple(t, t);
141
- }
142
- };
143
- }; // struct arg_minmax_f
144
-
145
- template <class T,
146
- class InputIt,
147
- class OutputIt,
148
- class Size,
149
- class ReductionOp>
150
- cudaError_t THRUST_RUNTIME_FUNCTION
151
- doit_step(void * d_temp_storage,
152
- size_t & temp_storage_bytes,
153
- InputIt input_it,
154
- Size num_items,
155
- ReductionOp reduction_op,
156
- OutputIt output_it,
157
- cudaStream_t stream,
158
- bool debug_sync)
159
- {
160
- using core::AgentPlan;
161
- using core::AgentLauncher;
162
- using core::get_agent_plan;
163
- using core::cuda_optional;
164
-
165
- typedef typename detail::make_unsigned_special<Size>::type UnsignedSize;
166
-
167
- if (num_items == 0)
168
- return cudaErrorNotSupported;
169
-
170
- typedef AgentLauncher<
171
- __reduce::ReduceAgent<InputIt, OutputIt, T, Size, ReductionOp> >
172
- reduce_agent;
173
-
174
- typename reduce_agent::Plan reduce_plan = reduce_agent::get_plan(stream);
175
-
176
- cudaError_t status = cudaSuccess;
177
-
178
-
179
- if (num_items <= reduce_plan.items_per_tile)
180
- {
181
- size_t vshmem_size = core::vshmem_size(reduce_plan.shared_memory_size, 1);
182
-
183
- // small, single tile size
184
- if (d_temp_storage == NULL)
185
- {
186
- temp_storage_bytes = max<size_t>(1, vshmem_size);
187
- return status;
188
- }
189
- char *vshmem_ptr = vshmem_size > 0 ? (char*)d_temp_storage : NULL;
190
-
191
- reduce_agent ra(reduce_plan, num_items, stream, vshmem_ptr, "reduce_agent: single_tile only", debug_sync);
192
- ra.launch(input_it, output_it, num_items, reduction_op);
193
- CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
194
- }
195
- else
196
- {
197
- // regular size
198
- cuda_optional<int> sm_count = core::get_sm_count();
199
- CUDA_CUB_RET_IF_FAIL(sm_count.status());
200
-
201
- // reduction will not use more cta counts than requested
202
- cuda_optional<int> max_blocks_per_sm =
203
- reduce_agent::
204
- template get_max_blocks_per_sm<InputIt,
205
- OutputIt,
206
- Size,
207
- cub::GridEvenShare<Size>,
208
- cub::GridQueue<UnsignedSize>,
209
- ReductionOp>(reduce_plan);
210
- CUDA_CUB_RET_IF_FAIL(max_blocks_per_sm.status());
211
-
212
-
213
-
214
- int reduce_device_occupancy = (int)max_blocks_per_sm * sm_count;
215
-
216
- int sm_oversubscription = 5;
217
- int max_blocks = reduce_device_occupancy * sm_oversubscription;
218
-
219
- cub::GridEvenShare<Size> even_share;
220
- even_share.DispatchInit(num_items, max_blocks,
221
- reduce_plan.items_per_tile);
222
-
223
- // we will launch at most "max_blocks" blocks in a grid
224
- // so preallocate virtual shared memory storage for this if required
225
- //
226
- size_t vshmem_size = core::vshmem_size(reduce_plan.shared_memory_size,
227
- max_blocks);
228
-
229
- // Temporary storage allocation requirements
230
- void * allocations[3] = {NULL, NULL, NULL};
231
- size_t allocation_sizes[3] =
232
- {
233
- max_blocks * sizeof(T), // bytes needed for privatized block reductions
234
- cub::GridQueue<UnsignedSize>::AllocationSize(), // bytes needed for grid queue descriptor0
235
- vshmem_size // size of virtualized shared memory storage
236
- };
237
- status = cub::AliasTemporaries(d_temp_storage,
238
- temp_storage_bytes,
239
- allocations,
240
- allocation_sizes);
241
- CUDA_CUB_RET_IF_FAIL(status);
242
- if (d_temp_storage == NULL)
243
- {
244
- return status;
245
- }
246
-
247
- T *d_block_reductions = (T*) allocations[0];
248
- cub::GridQueue<UnsignedSize> queue(allocations[1]);
249
- char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[2] : NULL;
250
-
251
-
252
- // Get grid size for device_reduce_sweep_kernel
253
- int reduce_grid_size = 0;
254
- if (reduce_plan.grid_mapping == cub::GRID_MAPPING_RAKE)
255
- {
256
- // Work is distributed evenly
257
- reduce_grid_size = even_share.grid_size;
258
- }
259
- else if (reduce_plan.grid_mapping == cub::GRID_MAPPING_DYNAMIC)
260
- {
261
- // Work is distributed dynamically
262
- size_t num_tiles = (num_items + reduce_plan.items_per_tile - 1) /
263
- reduce_plan.items_per_tile;
264
-
265
- // if not enough to fill the device with threadblocks
266
- // then fill the device with threadblocks
267
- reduce_grid_size = static_cast<int>(min(num_tiles, static_cast<size_t>(reduce_device_occupancy)));
268
-
269
- typedef AgentLauncher<__reduce::DrainAgent<Size> > drain_agent;
270
- AgentPlan drain_plan = drain_agent::get_plan();
271
- drain_plan.grid_size = 1;
272
- drain_agent da(drain_plan, stream, "__reduce::drain_agent", debug_sync);
273
- da.launch(queue, num_items);
274
- CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
275
- }
276
- else
277
- {
278
- CUDA_CUB_RET_IF_FAIL(cudaErrorNotSupported);
279
- }
280
-
281
- reduce_plan.grid_size = reduce_grid_size;
282
- reduce_agent ra(reduce_plan, stream, vshmem_ptr, "reduce_agent: regular size reduce", debug_sync);
283
- ra.launch(input_it,
284
- d_block_reductions,
285
- num_items,
286
- even_share,
287
- queue,
288
- reduction_op);
289
- CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
290
-
291
-
292
- typedef AgentLauncher<
293
- __reduce::ReduceAgent<T*, OutputIt, T, Size, ReductionOp> >
294
- reduce_agent_single;
295
-
296
- reduce_plan.grid_size = 1;
297
- reduce_agent_single ra1(reduce_plan, stream, vshmem_ptr, "reduce_agent: single tile reduce", debug_sync);
298
-
299
- ra1.launch(d_block_reductions, output_it, reduce_grid_size, reduction_op);
300
- CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
301
- }
302
-
303
- return status;
304
- } // func doit_step
305
-
306
- // this is an init-less reduce, needed for min/max-element functionality
307
- // this will avoid copying the first value from device->host
308
- template <typename Derived,
309
- typename InputIt,
310
- typename Size,
311
- typename BinaryOp,
312
- typename T>
313
- THRUST_RUNTIME_FUNCTION
314
- T extrema(execution_policy<Derived>& policy,
315
- InputIt first,
316
- Size num_items,
317
- BinaryOp binary_op,
318
- T*)
319
- {
320
- size_t temp_storage_bytes = 0;
321
- cudaStream_t stream = cuda_cub::stream(policy);
322
- bool debug_sync = THRUST_DEBUG_SYNC_FLAG;
323
-
324
- cudaError_t status;
325
- THRUST_INDEX_TYPE_DISPATCH(status, doit_step<T>, num_items,
326
- (NULL, temp_storage_bytes, first, num_items_fixed,
327
- binary_op, reinterpret_cast<T*>(NULL), stream,
328
- debug_sync));
329
- cuda_cub::throw_on_error(status, "extrema failed on 1st step");
330
-
331
- size_t allocation_sizes[2] = {sizeof(T*), temp_storage_bytes};
332
- void * allocations[2] = {NULL, NULL};
333
-
334
- size_t storage_size = 0;
335
- status = core::alias_storage(NULL,
336
- storage_size,
337
- allocations,
338
- allocation_sizes);
339
- cuda_cub::throw_on_error(status, "extrema failed on 1st alias storage");
340
-
341
- // Allocate temporary storage.
342
- thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
343
- tmp(policy, storage_size);
344
- void *ptr = static_cast<void*>(tmp.data().get());
345
-
346
- status = core::alias_storage(ptr,
347
- storage_size,
348
- allocations,
349
- allocation_sizes);
350
- cuda_cub::throw_on_error(status, "extrema failed on 2nd alias storage");
351
-
352
- T* d_result = thrust::detail::aligned_reinterpret_cast<T*>(allocations[0]);
353
-
354
- THRUST_INDEX_TYPE_DISPATCH(status, doit_step<T>, num_items,
355
- (allocations[1], temp_storage_bytes, first,
356
- num_items_fixed, binary_op, d_result, stream,
357
- debug_sync));
358
- cuda_cub::throw_on_error(status, "extrema failed on 2nd step");
359
-
360
- status = cuda_cub::synchronize(policy);
361
- cuda_cub::throw_on_error(status, "extrema failed to synchronize");
362
-
363
- T result = cuda_cub::get_value(policy, d_result);
364
-
365
- return result;
366
- }
367
-
368
- template <template <class, class, class> class ArgFunctor,
369
- class Derived,
370
- class ItemsIt,
371
- class BinaryPred>
372
- ItemsIt THRUST_RUNTIME_FUNCTION
373
- element(execution_policy<Derived> &policy,
374
- ItemsIt first,
375
- ItemsIt last,
376
- BinaryPred binary_pred)
377
- {
378
- if (first == last)
379
- return last;
380
-
381
- typedef typename iterator_traits<ItemsIt>::value_type InputType;
382
- typedef typename iterator_traits<ItemsIt>::difference_type IndexType;
383
-
384
- IndexType num_items = static_cast<IndexType>(thrust::distance(first, last));
385
-
386
- typedef tuple<ItemsIt, counting_iterator_t<IndexType> > iterator_tuple;
387
- typedef zip_iterator<iterator_tuple> zip_iterator;
388
-
389
- iterator_tuple iter_tuple = thrust::make_tuple(first, counting_iterator_t<IndexType>(0));
390
-
391
-
392
- typedef ArgFunctor<InputType, IndexType, BinaryPred> arg_min_t;
393
- typedef tuple<InputType, IndexType> T;
394
-
395
- zip_iterator begin = make_zip_iterator(iter_tuple);
396
-
397
- T result = extrema(policy,
398
- begin,
399
- num_items,
400
- arg_min_t(binary_pred),
401
- (T *)(NULL));
402
- return first + thrust::get<1>(result);
403
- }
404
-
405
-
406
- } // namespace __extrema
407
-
408
- /// min element
409
-
410
- __thrust_exec_check_disable__
411
- template <class Derived,
412
- class ItemsIt,
413
- class BinaryPred>
414
- ItemsIt __host__ __device__
415
- min_element(execution_policy<Derived> &policy,
416
- ItemsIt first,
417
- ItemsIt last,
418
- BinaryPred binary_pred)
419
- {
420
- ItemsIt ret = first;
421
- if (__THRUST_HAS_CUDART__)
422
- {
423
- ret = __extrema::element<__extrema::arg_min_f>(policy,
424
- first,
425
- last,
426
- binary_pred);
427
- }
428
- else
429
- {
430
- #if !__THRUST_HAS_CUDART__
431
- ret = thrust::min_element(cvt_to_seq(derived_cast(policy)),
432
- first,
433
- last,
434
- binary_pred);
435
- #endif
436
- }
437
- return ret;
438
- }
439
-
440
- template <class Derived,
441
- class ItemsIt>
442
- ItemsIt __host__ __device__
443
- min_element(execution_policy<Derived> &policy,
444
- ItemsIt first,
445
- ItemsIt last)
446
- {
447
- typedef typename iterator_value<ItemsIt>::type value_type;
448
- return cuda_cub::min_element(policy, first, last, less<value_type>());
449
- }
450
-
451
- /// max element
452
-
453
- __thrust_exec_check_disable__
454
- template <class Derived,
455
- class ItemsIt,
456
- class BinaryPred>
457
- ItemsIt __host__ __device__
458
- max_element(execution_policy<Derived> &policy,
459
- ItemsIt first,
460
- ItemsIt last,
461
- BinaryPred binary_pred)
462
- {
463
- ItemsIt ret = first;
464
- if (__THRUST_HAS_CUDART__)
465
- {
466
- ret = __extrema::element<__extrema::arg_max_f>(policy,
467
- first,
468
- last,
469
- binary_pred);
470
- }
471
- else
472
- {
473
- #if !__THRUST_HAS_CUDART__
474
- ret = thrust::max_element(cvt_to_seq(derived_cast(policy)),
475
- first,
476
- last,
477
- binary_pred);
478
- #endif
479
- }
480
- return ret;
481
- }
482
-
483
- template <class Derived,
484
- class ItemsIt>
485
- ItemsIt __host__ __device__
486
- max_element(execution_policy<Derived> &policy,
487
- ItemsIt first,
488
- ItemsIt last)
489
- {
490
- typedef typename iterator_value<ItemsIt>::type value_type;
491
- return cuda_cub::max_element(policy, first, last, less<value_type>());
492
- }
493
-
494
- /// minmax element
495
-
496
- __thrust_exec_check_disable__
497
- template <class Derived,
498
- class ItemsIt,
499
- class BinaryPred>
500
- pair<ItemsIt, ItemsIt> __host__ __device__
501
- minmax_element(execution_policy<Derived> &policy,
502
- ItemsIt first,
503
- ItemsIt last,
504
- BinaryPred binary_pred)
505
- {
506
- pair<ItemsIt, ItemsIt> ret = thrust::make_pair(first, first);
507
-
508
- if (__THRUST_HAS_CUDART__)
509
- {
510
- if (first == last)
511
- return thrust::make_pair(last, last);
512
-
513
- typedef typename iterator_traits<ItemsIt>::value_type InputType;
514
- typedef typename iterator_traits<ItemsIt>::difference_type IndexType;
515
-
516
- IndexType num_items = static_cast<IndexType>(thrust::distance(first, last));
517
-
518
-
519
- typedef tuple<ItemsIt, counting_iterator_t<IndexType> > iterator_tuple;
520
- typedef zip_iterator<iterator_tuple> zip_iterator;
521
-
522
- iterator_tuple iter_tuple = thrust::make_tuple(first, counting_iterator_t<IndexType>(0));
523
-
524
-
525
- typedef __extrema::arg_minmax_f<InputType, IndexType, BinaryPred> arg_minmax_t;
526
- typedef typename arg_minmax_t::two_pairs_type two_pairs_type;
527
- typedef typename arg_minmax_t::duplicate_tuple duplicate_t;
528
- typedef transform_input_iterator_t<two_pairs_type,
529
- zip_iterator,
530
- duplicate_t>
531
- transform_t;
532
-
533
- zip_iterator begin = make_zip_iterator(iter_tuple);
534
- two_pairs_type result = __extrema::extrema(policy,
535
- transform_t(begin, duplicate_t()),
536
- num_items,
537
- arg_minmax_t(binary_pred),
538
- (two_pairs_type *)(NULL));
539
- ret = thrust::make_pair(first + get<1>(get<0>(result)),
540
- first + get<1>(get<1>(result)));
541
- }
542
- else
543
- {
544
- #if !__THRUST_HAS_CUDART__
545
- ret = thrust::minmax_element(cvt_to_seq(derived_cast(policy)),
546
- first,
547
- last,
548
- binary_pred);
549
- #endif
550
- }
551
- return ret;
552
- }
553
-
554
- template <class Derived,
555
- class ItemsIt>
556
- pair<ItemsIt, ItemsIt> __host__ __device__
557
- minmax_element(execution_policy<Derived> &policy,
558
- ItemsIt first,
559
- ItemsIt last)
560
- {
561
- typedef typename iterator_value<ItemsIt>::type value_type;
562
- return cuda_cub::minmax_element(policy, first, last, less<value_type>());
563
- }
564
-
565
-
566
- } // namespace cuda_cub
567
- } // end namespace thrust
568
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/error.h DELETED
@@ -1,183 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file thrust/system/cuda/error.h
19
- * \brief CUDA-specific error reporting
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/type_traits.h>
26
- #include <thrust/system/error_code.h>
27
- #include <thrust/system/cuda/detail/guarded_driver_types.h>
28
-
29
- namespace thrust
30
- {
31
-
32
- namespace system
33
- {
34
-
35
- namespace cuda
36
- {
37
-
38
- // To construct an error_code after a CUDA Runtime error:
39
- //
40
- // error_code(::cudaGetLastError(), cuda_category())
41
-
42
- // XXX N3000 prefers enum class errc { ... }
43
- /*! Namespace for CUDA Runtime errors.
44
- */
45
- namespace errc
46
- {
47
-
48
- /*! \p errc_t enumerates the kinds of CUDA Runtime errors.
49
- */
50
- enum errc_t
51
- {
52
- // from cuda/include/driver_types.h
53
- // mirror their order
54
- success = cudaSuccess,
55
- missing_configuration = cudaErrorMissingConfiguration,
56
- memory_allocation = cudaErrorMemoryAllocation,
57
- initialization_error = cudaErrorInitializationError,
58
- launch_failure = cudaErrorLaunchFailure,
59
- prior_launch_failure = cudaErrorPriorLaunchFailure,
60
- launch_timeout = cudaErrorLaunchTimeout,
61
- launch_out_of_resources = cudaErrorLaunchOutOfResources,
62
- invalid_device_function = cudaErrorInvalidDeviceFunction,
63
- invalid_configuration = cudaErrorInvalidConfiguration,
64
- invalid_device = cudaErrorInvalidDevice,
65
- invalid_value = cudaErrorInvalidValue,
66
- invalid_pitch_value = cudaErrorInvalidPitchValue,
67
- invalid_symbol = cudaErrorInvalidSymbol,
68
- map_buffer_object_failed = cudaErrorMapBufferObjectFailed,
69
- unmap_buffer_object_failed = cudaErrorUnmapBufferObjectFailed,
70
- invalid_host_pointer = cudaErrorInvalidHostPointer,
71
- invalid_device_pointer = cudaErrorInvalidDevicePointer,
72
- invalid_texture = cudaErrorInvalidTexture,
73
- invalid_texture_binding = cudaErrorInvalidTextureBinding,
74
- invalid_channel_descriptor = cudaErrorInvalidChannelDescriptor,
75
- invalid_memcpy_direction = cudaErrorInvalidMemcpyDirection,
76
- address_of_constant_error = cudaErrorAddressOfConstant,
77
- texture_fetch_failed = cudaErrorTextureFetchFailed,
78
- texture_not_bound = cudaErrorTextureNotBound,
79
- synchronization_error = cudaErrorSynchronizationError,
80
- invalid_filter_setting = cudaErrorInvalidFilterSetting,
81
- invalid_norm_setting = cudaErrorInvalidNormSetting,
82
- mixed_device_execution = cudaErrorMixedDeviceExecution,
83
- cuda_runtime_unloading = cudaErrorCudartUnloading,
84
- unknown = cudaErrorUnknown,
85
- not_yet_implemented = cudaErrorNotYetImplemented,
86
- memory_value_too_large = cudaErrorMemoryValueTooLarge,
87
- invalid_resource_handle = cudaErrorInvalidResourceHandle,
88
- not_ready = cudaErrorNotReady,
89
- insufficient_driver = cudaErrorInsufficientDriver,
90
- set_on_active_process_error = cudaErrorSetOnActiveProcess,
91
- no_device = cudaErrorNoDevice,
92
- ecc_uncorrectable = cudaErrorECCUncorrectable,
93
-
94
- #if CUDART_VERSION >= 4020
95
- shared_object_symbol_not_found = cudaErrorSharedObjectSymbolNotFound,
96
- shared_object_init_failed = cudaErrorSharedObjectInitFailed,
97
- unsupported_limit = cudaErrorUnsupportedLimit,
98
- duplicate_variable_name = cudaErrorDuplicateVariableName,
99
- duplicate_texture_name = cudaErrorDuplicateTextureName,
100
- duplicate_surface_name = cudaErrorDuplicateSurfaceName,
101
- devices_unavailable = cudaErrorDevicesUnavailable,
102
- invalid_kernel_image = cudaErrorInvalidKernelImage,
103
- no_kernel_image_for_device = cudaErrorNoKernelImageForDevice,
104
- incompatible_driver_context = cudaErrorIncompatibleDriverContext,
105
- peer_access_already_enabled = cudaErrorPeerAccessAlreadyEnabled,
106
- peer_access_not_enabled = cudaErrorPeerAccessNotEnabled,
107
- device_already_in_use = cudaErrorDeviceAlreadyInUse,
108
- profiler_disabled = cudaErrorProfilerDisabled,
109
- assert_triggered = cudaErrorAssert,
110
- too_many_peers = cudaErrorTooManyPeers,
111
- host_memory_already_registered = cudaErrorHostMemoryAlreadyRegistered,
112
- host_memory_not_registered = cudaErrorHostMemoryNotRegistered,
113
- operating_system_error = cudaErrorOperatingSystem,
114
- #endif
115
-
116
- #if CUDART_VERSION >= 5000
117
- peer_access_unsupported = cudaErrorPeerAccessUnsupported,
118
- launch_max_depth_exceeded = cudaErrorLaunchMaxDepthExceeded,
119
- launch_file_scoped_texture_used = cudaErrorLaunchFileScopedTex,
120
- launch_file_scoped_surface_used = cudaErrorLaunchFileScopedSurf,
121
- sync_depth_exceeded = cudaErrorSyncDepthExceeded,
122
- attempted_operation_not_permitted = cudaErrorNotPermitted,
123
- attempted_operation_not_supported = cudaErrorNotSupported,
124
- #endif
125
-
126
- startup_failure = cudaErrorStartupFailure
127
- }; // end errc_t
128
-
129
-
130
- } // end namespace errc
131
-
132
- } // end namespace cuda_cub
133
-
134
- /*! \return A reference to an object of a type derived from class \p thrust::error_category.
135
- * \note The object's \p equivalent virtual functions shall behave as specified
136
- * for the class \p thrust::error_category. The object's \p name virtual function shall
137
- * return a pointer to the string <tt>"cuda"</tt>. The object's
138
- * \p default_error_condition virtual function shall behave as follows:
139
- *
140
- * If the argument <tt>ev</tt> corresponds to a CUDA error value, the function
141
- * shall return <tt>error_condition(ev,cuda_category())</tt>.
142
- * Otherwise, the function shall return <tt>system_category.default_error_condition(ev)</tt>.
143
- */
144
- inline const error_category &cuda_category(void);
145
-
146
-
147
- // XXX N3000 prefers is_error_code_enum<cuda::errc>
148
-
149
- /*! Specialization of \p is_error_code_enum for \p cuda::errc::errc_t
150
- */
151
- template<> struct is_error_code_enum<cuda::errc::errc_t> : thrust::detail::true_type {};
152
-
153
-
154
- // XXX replace cuda::errc::errc_t with cuda::errc upon c++0x
155
- /*! \return <tt>error_code(static_cast<int>(e), cuda::error_category())</tt>
156
- */
157
- inline error_code make_error_code(cuda::errc::errc_t e);
158
-
159
-
160
- // XXX replace cuda::errc::errc_t with cuda::errc upon c++0x
161
- /*! \return <tt>error_condition(static_cast<int>(e), cuda::error_category())</tt>.
162
- */
163
- inline error_condition make_error_condition(cuda::errc::errc_t e);
164
-
165
- } // end system
166
-
167
- namespace cuda_cub
168
- {
169
- namespace errc = system::cuda::errc;
170
- } // end cuda_cub
171
-
172
- namespace cuda
173
- {
174
- // XXX replace with using system::cuda_errc upon c++0x
175
- namespace errc = system::cuda::errc;
176
- } // end cuda
177
-
178
- using system::cuda_category;
179
-
180
- } // end namespace thrust
181
-
182
- #include <thrust/system/cuda/detail/error.inl>
183
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/scan_by_key.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the scan_by_key.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch scan_by_key
24
-
25
- #include <thrust/system/detail/sequential/scan_by_key.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/scan_by_key.h>
32
- #include <thrust/system/cuda/detail/scan_by_key.h>
33
- #include <thrust/system/omp/detail/scan_by_key.h>
34
- #include <thrust/system/tbb/detail/scan_by_key.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_SCAN_BY_KEY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/scan_by_key.h>
38
- #include __THRUST_HOST_SYSTEM_SCAN_BY_KEY_HEADER
39
- #undef __THRUST_HOST_SYSTEM_SCAN_BY_KEY_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_SCAN_BY_KEY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/scan_by_key.h>
42
- #include __THRUST_DEVICE_SYSTEM_SCAN_BY_KEY_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_SCAN_BY_KEY_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/tabulate.h DELETED
@@ -1,49 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- #pragma once
19
-
20
- #include <thrust/detail/config.h>
21
- #include <thrust/system/detail/generic/tag.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace generic
30
- {
31
-
32
-
33
- template<typename DerivedPolicy,
34
- typename ForwardIterator,
35
- typename UnaryOperation>
36
- __host__ __device__
37
- void tabulate(thrust::execution_policy<DerivedPolicy> &exec,
38
- ForwardIterator first,
39
- ForwardIterator last,
40
- UnaryOperation unary_op);
41
-
42
-
43
- } // end namespace generic
44
- } // end namespace detail
45
- } // end namespace system
46
- } // end namespace thrust
47
-
48
- #include <thrust/system/detail/generic/tabulate.inl>
49
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/anchor/anchor_generator.py DELETED
@@ -1,727 +0,0 @@
1
- import mmcv
2
- import numpy as np
3
- import torch
4
- from torch.nn.modules.utils import _pair
5
-
6
- from .builder import ANCHOR_GENERATORS
7
-
8
-
9
- @ANCHOR_GENERATORS.register_module()
10
- class AnchorGenerator(object):
11
- """Standard anchor generator for 2D anchor-based detectors.
12
-
13
- Args:
14
- strides (list[int] | list[tuple[int, int]]): Strides of anchors
15
- in multiple feature levels in order (w, h).
16
- ratios (list[float]): The list of ratios between the height and width
17
- of anchors in a single level.
18
- scales (list[int] | None): Anchor scales for anchors in a single level.
19
- It cannot be set at the same time if `octave_base_scale` and
20
- `scales_per_octave` are set.
21
- base_sizes (list[int] | None): The basic sizes
22
- of anchors in multiple levels.
23
- If None is given, strides will be used as base_sizes.
24
- (If strides are non square, the shortest stride is taken.)
25
- scale_major (bool): Whether to multiply scales first when generating
26
- base anchors. If true, the anchors in the same row will have the
27
- same scales. By default it is True in V2.0
28
- octave_base_scale (int): The base scale of octave.
29
- scales_per_octave (int): Number of scales for each octave.
30
- `octave_base_scale` and `scales_per_octave` are usually used in
31
- retinanet and the `scales` should be None when they are set.
32
- centers (list[tuple[float, float]] | None): The centers of the anchor
33
- relative to the feature grid center in multiple feature levels.
34
- By default it is set to be None and not used. If a list of tuple of
35
- float is given, they will be used to shift the centers of anchors.
36
- center_offset (float): The offset of center in proportion to anchors'
37
- width and height. By default it is 0 in V2.0.
38
-
39
- Examples:
40
- >>> from mmdet.core import AnchorGenerator
41
- >>> self = AnchorGenerator([16], [1.], [1.], [9])
42
- >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu')
43
- >>> print(all_anchors)
44
- [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
45
- [11.5000, -4.5000, 20.5000, 4.5000],
46
- [-4.5000, 11.5000, 4.5000, 20.5000],
47
- [11.5000, 11.5000, 20.5000, 20.5000]])]
48
- >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
49
- >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu')
50
- >>> print(all_anchors)
51
- [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
52
- [11.5000, -4.5000, 20.5000, 4.5000],
53
- [-4.5000, 11.5000, 4.5000, 20.5000],
54
- [11.5000, 11.5000, 20.5000, 20.5000]]), \
55
- tensor([[-9., -9., 9., 9.]])]
56
- """
57
-
58
- def __init__(self,
59
- strides,
60
- ratios,
61
- scales=None,
62
- base_sizes=None,
63
- scale_major=True,
64
- octave_base_scale=None,
65
- scales_per_octave=None,
66
- centers=None,
67
- center_offset=0.):
68
- # check center and center_offset
69
- if center_offset != 0:
70
- assert centers is None, 'center cannot be set when center_offset' \
71
- f'!=0, {centers} is given.'
72
- if not (0 <= center_offset <= 1):
73
- raise ValueError('center_offset should be in range [0, 1], '
74
- f'{center_offset} is given.')
75
- if centers is not None:
76
- assert len(centers) == len(strides), \
77
- 'The number of strides should be the same as centers, got ' \
78
- f'{strides} and {centers}'
79
-
80
- # calculate base sizes of anchors
81
- self.strides = [_pair(stride) for stride in strides]
82
- self.base_sizes = [min(stride) for stride in self.strides
83
- ] if base_sizes is None else base_sizes
84
- assert len(self.base_sizes) == len(self.strides), \
85
- 'The number of strides should be the same as base sizes, got ' \
86
- f'{self.strides} and {self.base_sizes}'
87
-
88
- # calculate scales of anchors
89
- assert ((octave_base_scale is not None
90
- and scales_per_octave is not None) ^ (scales is not None)), \
91
- 'scales and octave_base_scale with scales_per_octave cannot' \
92
- ' be set at the same time'
93
- if scales is not None:
94
- self.scales = torch.Tensor(scales)
95
- elif octave_base_scale is not None and scales_per_octave is not None:
96
- octave_scales = np.array(
97
- [2**(i / scales_per_octave) for i in range(scales_per_octave)])
98
- scales = octave_scales * octave_base_scale
99
- self.scales = torch.Tensor(scales)
100
- else:
101
- raise ValueError('Either scales or octave_base_scale with '
102
- 'scales_per_octave should be set')
103
-
104
- self.octave_base_scale = octave_base_scale
105
- self.scales_per_octave = scales_per_octave
106
- self.ratios = torch.Tensor(ratios)
107
- self.scale_major = scale_major
108
- self.centers = centers
109
- self.center_offset = center_offset
110
- self.base_anchors = self.gen_base_anchors()
111
-
112
- @property
113
- def num_base_anchors(self):
114
- """list[int]: total number of base anchors in a feature grid"""
115
- return [base_anchors.size(0) for base_anchors in self.base_anchors]
116
-
117
- @property
118
- def num_levels(self):
119
- """int: number of feature levels that the generator will be applied"""
120
- return len(self.strides)
121
-
122
- def gen_base_anchors(self):
123
- """Generate base anchors.
124
-
125
- Returns:
126
- list(torch.Tensor): Base anchors of a feature grid in multiple \
127
- feature levels.
128
- """
129
- multi_level_base_anchors = []
130
- for i, base_size in enumerate(self.base_sizes):
131
- center = None
132
- if self.centers is not None:
133
- center = self.centers[i]
134
- multi_level_base_anchors.append(
135
- self.gen_single_level_base_anchors(
136
- base_size,
137
- scales=self.scales,
138
- ratios=self.ratios,
139
- center=center))
140
- return multi_level_base_anchors
141
-
142
- def gen_single_level_base_anchors(self,
143
- base_size,
144
- scales,
145
- ratios,
146
- center=None):
147
- """Generate base anchors of a single level.
148
-
149
- Args:
150
- base_size (int | float): Basic size of an anchor.
151
- scales (torch.Tensor): Scales of the anchor.
152
- ratios (torch.Tensor): The ratio between between the height
153
- and width of anchors in a single level.
154
- center (tuple[float], optional): The center of the base anchor
155
- related to a single feature grid. Defaults to None.
156
-
157
- Returns:
158
- torch.Tensor: Anchors in a single-level feature maps.
159
- """
160
- w = base_size
161
- h = base_size
162
- if center is None:
163
- x_center = self.center_offset * w
164
- y_center = self.center_offset * h
165
- else:
166
- x_center, y_center = center
167
-
168
- h_ratios = torch.sqrt(ratios)
169
- w_ratios = 1 / h_ratios
170
- if self.scale_major:
171
- ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
172
- hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
173
- else:
174
- ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
175
- hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
176
-
177
- # use float anchor and the anchor's center is aligned with the
178
- # pixel center
179
- base_anchors = [
180
- x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
181
- y_center + 0.5 * hs
182
- ]
183
- base_anchors = torch.stack(base_anchors, dim=-1)
184
-
185
- return base_anchors
186
-
187
- def _meshgrid(self, x, y, row_major=True):
188
- """Generate mesh grid of x and y.
189
-
190
- Args:
191
- x (torch.Tensor): Grids of x dimension.
192
- y (torch.Tensor): Grids of y dimension.
193
- row_major (bool, optional): Whether to return y grids first.
194
- Defaults to True.
195
-
196
- Returns:
197
- tuple[torch.Tensor]: The mesh grids of x and y.
198
- """
199
- # use shape instead of len to keep tracing while exporting to onnx
200
- xx = x.repeat(y.shape[0])
201
- yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
202
- if row_major:
203
- return xx, yy
204
- else:
205
- return yy, xx
206
-
207
- def grid_anchors(self, featmap_sizes, device='cuda'):
208
- """Generate grid anchors in multiple feature levels.
209
-
210
- Args:
211
- featmap_sizes (list[tuple]): List of feature map sizes in
212
- multiple feature levels.
213
- device (str): Device where the anchors will be put on.
214
-
215
- Return:
216
- list[torch.Tensor]: Anchors in multiple feature levels. \
217
- The sizes of each tensor should be [N, 4], where \
218
- N = width * height * num_base_anchors, width and height \
219
- are the sizes of the corresponding feature level, \
220
- num_base_anchors is the number of anchors for that level.
221
- """
222
- assert self.num_levels == len(featmap_sizes)
223
- multi_level_anchors = []
224
- for i in range(self.num_levels):
225
- anchors = self.single_level_grid_anchors(
226
- self.base_anchors[i].to(device),
227
- featmap_sizes[i],
228
- self.strides[i],
229
- device=device)
230
- multi_level_anchors.append(anchors)
231
- return multi_level_anchors
232
-
233
- def single_level_grid_anchors(self,
234
- base_anchors,
235
- featmap_size,
236
- stride=(16, 16),
237
- device='cuda'):
238
- """Generate grid anchors of a single level.
239
-
240
- Note:
241
- This function is usually called by method ``self.grid_anchors``.
242
-
243
- Args:
244
- base_anchors (torch.Tensor): The base anchors of a feature grid.
245
- featmap_size (tuple[int]): Size of the feature maps.
246
- stride (tuple[int], optional): Stride of the feature map in order
247
- (w, h). Defaults to (16, 16).
248
- device (str, optional): Device the tensor will be put on.
249
- Defaults to 'cuda'.
250
-
251
- Returns:
252
- torch.Tensor: Anchors in the overall feature maps.
253
- """
254
- # keep as Tensor, so that we can covert to ONNX correctly
255
- feat_h, feat_w = featmap_size
256
- shift_x = torch.arange(0, feat_w, device=device) * stride[0]
257
- shift_y = torch.arange(0, feat_h, device=device) * stride[1]
258
-
259
- shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
260
- shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
261
- shifts = shifts.type_as(base_anchors)
262
- # first feat_w elements correspond to the first row of shifts
263
- # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
264
- # shifted anchors (K, A, 4), reshape to (K*A, 4)
265
-
266
- all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
267
- all_anchors = all_anchors.view(-1, 4)
268
- # first A rows correspond to A anchors of (0, 0) in feature map,
269
- # then (0, 1), (0, 2), ...
270
- return all_anchors
271
-
272
- def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
273
- """Generate valid flags of anchors in multiple feature levels.
274
-
275
- Args:
276
- featmap_sizes (list(tuple)): List of feature map sizes in
277
- multiple feature levels.
278
- pad_shape (tuple): The padded shape of the image.
279
- device (str): Device where the anchors will be put on.
280
-
281
- Return:
282
- list(torch.Tensor): Valid flags of anchors in multiple levels.
283
- """
284
- assert self.num_levels == len(featmap_sizes)
285
- multi_level_flags = []
286
- for i in range(self.num_levels):
287
- anchor_stride = self.strides[i]
288
- feat_h, feat_w = featmap_sizes[i]
289
- h, w = pad_shape[:2]
290
- valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
291
- valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
292
- flags = self.single_level_valid_flags((feat_h, feat_w),
293
- (valid_feat_h, valid_feat_w),
294
- self.num_base_anchors[i],
295
- device=device)
296
- multi_level_flags.append(flags)
297
- return multi_level_flags
298
-
299
- def single_level_valid_flags(self,
300
- featmap_size,
301
- valid_size,
302
- num_base_anchors,
303
- device='cuda'):
304
- """Generate the valid flags of anchor in a single feature map.
305
-
306
- Args:
307
- featmap_size (tuple[int]): The size of feature maps.
308
- valid_size (tuple[int]): The valid size of the feature maps.
309
- num_base_anchors (int): The number of base anchors.
310
- device (str, optional): Device where the flags will be put on.
311
- Defaults to 'cuda'.
312
-
313
- Returns:
314
- torch.Tensor: The valid flags of each anchor in a single level \
315
- feature map.
316
- """
317
- feat_h, feat_w = featmap_size
318
- valid_h, valid_w = valid_size
319
- assert valid_h <= feat_h and valid_w <= feat_w
320
- valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
321
- valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
322
- valid_x[:valid_w] = 1
323
- valid_y[:valid_h] = 1
324
- valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
325
- valid = valid_xx & valid_yy
326
- valid = valid[:, None].expand(valid.size(0),
327
- num_base_anchors).contiguous().view(-1)
328
- return valid
329
-
330
- def __repr__(self):
331
- """str: a string that describes the module"""
332
- indent_str = ' '
333
- repr_str = self.__class__.__name__ + '(\n'
334
- repr_str += f'{indent_str}strides={self.strides},\n'
335
- repr_str += f'{indent_str}ratios={self.ratios},\n'
336
- repr_str += f'{indent_str}scales={self.scales},\n'
337
- repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
338
- repr_str += f'{indent_str}scale_major={self.scale_major},\n'
339
- repr_str += f'{indent_str}octave_base_scale='
340
- repr_str += f'{self.octave_base_scale},\n'
341
- repr_str += f'{indent_str}scales_per_octave='
342
- repr_str += f'{self.scales_per_octave},\n'
343
- repr_str += f'{indent_str}num_levels={self.num_levels}\n'
344
- repr_str += f'{indent_str}centers={self.centers},\n'
345
- repr_str += f'{indent_str}center_offset={self.center_offset})'
346
- return repr_str
347
-
348
-
349
- @ANCHOR_GENERATORS.register_module()
350
- class SSDAnchorGenerator(AnchorGenerator):
351
- """Anchor generator for SSD.
352
-
353
- Args:
354
- strides (list[int] | list[tuple[int, int]]): Strides of anchors
355
- in multiple feature levels.
356
- ratios (list[float]): The list of ratios between the height and width
357
- of anchors in a single level.
358
- basesize_ratio_range (tuple(float)): Ratio range of anchors.
359
- input_size (int): Size of feature map, 300 for SSD300,
360
- 512 for SSD512.
361
- scale_major (bool): Whether to multiply scales first when generating
362
- base anchors. If true, the anchors in the same row will have the
363
- same scales. It is always set to be False in SSD.
364
- """
365
-
366
- def __init__(self,
367
- strides,
368
- ratios,
369
- basesize_ratio_range,
370
- input_size=300,
371
- scale_major=True):
372
- assert len(strides) == len(ratios)
373
- assert mmcv.is_tuple_of(basesize_ratio_range, float)
374
-
375
- self.strides = [_pair(stride) for stride in strides]
376
- self.input_size = input_size
377
- self.centers = [(stride[0] / 2., stride[1] / 2.)
378
- for stride in self.strides]
379
- self.basesize_ratio_range = basesize_ratio_range
380
-
381
- # calculate anchor ratios and sizes
382
- min_ratio, max_ratio = basesize_ratio_range
383
- min_ratio = int(min_ratio * 100)
384
- max_ratio = int(max_ratio * 100)
385
- step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
386
- min_sizes = []
387
- max_sizes = []
388
- for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
389
- min_sizes.append(int(self.input_size * ratio / 100))
390
- max_sizes.append(int(self.input_size * (ratio + step) / 100))
391
- if self.input_size == 300:
392
- if basesize_ratio_range[0] == 0.15: # SSD300 COCO
393
- min_sizes.insert(0, int(self.input_size * 7 / 100))
394
- max_sizes.insert(0, int(self.input_size * 15 / 100))
395
- elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
396
- min_sizes.insert(0, int(self.input_size * 10 / 100))
397
- max_sizes.insert(0, int(self.input_size * 20 / 100))
398
- else:
399
- raise ValueError(
400
- 'basesize_ratio_range[0] should be either 0.15'
401
- 'or 0.2 when input_size is 300, got '
402
- f'{basesize_ratio_range[0]}.')
403
- elif self.input_size == 512:
404
- if basesize_ratio_range[0] == 0.1: # SSD512 COCO
405
- min_sizes.insert(0, int(self.input_size * 4 / 100))
406
- max_sizes.insert(0, int(self.input_size * 10 / 100))
407
- elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
408
- min_sizes.insert(0, int(self.input_size * 7 / 100))
409
- max_sizes.insert(0, int(self.input_size * 15 / 100))
410
- else:
411
- raise ValueError('basesize_ratio_range[0] should be either 0.1'
412
- 'or 0.15 when input_size is 512, got'
413
- f' {basesize_ratio_range[0]}.')
414
- else:
415
- raise ValueError('Only support 300 or 512 in SSDAnchorGenerator'
416
- f', got {self.input_size}.')
417
-
418
- anchor_ratios = []
419
- anchor_scales = []
420
- for k in range(len(self.strides)):
421
- scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
422
- anchor_ratio = [1.]
423
- for r in ratios[k]:
424
- anchor_ratio += [1 / r, r] # 4 or 6 ratio
425
- anchor_ratios.append(torch.Tensor(anchor_ratio))
426
- anchor_scales.append(torch.Tensor(scales))
427
-
428
- self.base_sizes = min_sizes
429
- self.scales = anchor_scales
430
- self.ratios = anchor_ratios
431
- self.scale_major = scale_major
432
- self.center_offset = 0
433
- self.base_anchors = self.gen_base_anchors()
434
-
435
- def gen_base_anchors(self):
436
- """Generate base anchors.
437
-
438
- Returns:
439
- list(torch.Tensor): Base anchors of a feature grid in multiple \
440
- feature levels.
441
- """
442
- multi_level_base_anchors = []
443
- for i, base_size in enumerate(self.base_sizes):
444
- base_anchors = self.gen_single_level_base_anchors(
445
- base_size,
446
- scales=self.scales[i],
447
- ratios=self.ratios[i],
448
- center=self.centers[i])
449
- indices = list(range(len(self.ratios[i])))
450
- indices.insert(1, len(indices))
451
- base_anchors = torch.index_select(base_anchors, 0,
452
- torch.LongTensor(indices))
453
- multi_level_base_anchors.append(base_anchors)
454
- return multi_level_base_anchors
455
-
456
- def __repr__(self):
457
- """str: a string that describes the module"""
458
- indent_str = ' '
459
- repr_str = self.__class__.__name__ + '(\n'
460
- repr_str += f'{indent_str}strides={self.strides},\n'
461
- repr_str += f'{indent_str}scales={self.scales},\n'
462
- repr_str += f'{indent_str}scale_major={self.scale_major},\n'
463
- repr_str += f'{indent_str}input_size={self.input_size},\n'
464
- repr_str += f'{indent_str}scales={self.scales},\n'
465
- repr_str += f'{indent_str}ratios={self.ratios},\n'
466
- repr_str += f'{indent_str}num_levels={self.num_levels},\n'
467
- repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
468
- repr_str += f'{indent_str}basesize_ratio_range='
469
- repr_str += f'{self.basesize_ratio_range})'
470
- return repr_str
471
-
472
-
473
- @ANCHOR_GENERATORS.register_module()
474
- class LegacyAnchorGenerator(AnchorGenerator):
475
- """Legacy anchor generator used in MMDetection V1.x.
476
-
477
- Note:
478
- Difference to the V2.0 anchor generator:
479
-
480
- 1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
481
- 2. The width/height are minused by 1 when calculating the anchors' \
482
- centers and corners to meet the V1.x coordinate system.
483
- 3. The anchors' corners are quantized.
484
-
485
- Args:
486
- strides (list[int] | list[tuple[int]]): Strides of anchors
487
- in multiple feature levels.
488
- ratios (list[float]): The list of ratios between the height and width
489
- of anchors in a single level.
490
- scales (list[int] | None): Anchor scales for anchors in a single level.
491
- It cannot be set at the same time if `octave_base_scale` and
492
- `scales_per_octave` are set.
493
- base_sizes (list[int]): The basic sizes of anchors in multiple levels.
494
- If None is given, strides will be used to generate base_sizes.
495
- scale_major (bool): Whether to multiply scales first when generating
496
- base anchors. If true, the anchors in the same row will have the
497
- same scales. By default it is True in V2.0
498
- octave_base_scale (int): The base scale of octave.
499
- scales_per_octave (int): Number of scales for each octave.
500
- `octave_base_scale` and `scales_per_octave` are usually used in
501
- retinanet and the `scales` should be None when they are set.
502
- centers (list[tuple[float, float]] | None): The centers of the anchor
503
- relative to the feature grid center in multiple feature levels.
504
- By default it is set to be None and not used. It a list of float
505
- is given, this list will be used to shift the centers of anchors.
506
- center_offset (float): The offset of center in propotion to anchors'
507
- width and height. By default it is 0.5 in V2.0 but it should be 0.5
508
- in v1.x models.
509
-
510
- Examples:
511
- >>> from mmdet.core import LegacyAnchorGenerator
512
- >>> self = LegacyAnchorGenerator(
513
- >>> [16], [1.], [1.], [9], center_offset=0.5)
514
- >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
515
- >>> print(all_anchors)
516
- [tensor([[ 0., 0., 8., 8.],
517
- [16., 0., 24., 8.],
518
- [ 0., 16., 8., 24.],
519
- [16., 16., 24., 24.]])]
520
- """
521
-
522
- def gen_single_level_base_anchors(self,
523
- base_size,
524
- scales,
525
- ratios,
526
- center=None):
527
- """Generate base anchors of a single level.
528
-
529
- Note:
530
- The width/height of anchors are minused by 1 when calculating \
531
- the centers and corners to meet the V1.x coordinate system.
532
-
533
- Args:
534
- base_size (int | float): Basic size of an anchor.
535
- scales (torch.Tensor): Scales of the anchor.
536
- ratios (torch.Tensor): The ratio between between the height.
537
- and width of anchors in a single level.
538
- center (tuple[float], optional): The center of the base anchor
539
- related to a single feature grid. Defaults to None.
540
-
541
- Returns:
542
- torch.Tensor: Anchors in a single-level feature map.
543
- """
544
- w = base_size
545
- h = base_size
546
- if center is None:
547
- x_center = self.center_offset * (w - 1)
548
- y_center = self.center_offset * (h - 1)
549
- else:
550
- x_center, y_center = center
551
-
552
- h_ratios = torch.sqrt(ratios)
553
- w_ratios = 1 / h_ratios
554
- if self.scale_major:
555
- ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
556
- hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
557
- else:
558
- ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
559
- hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
560
-
561
- # use float anchor and the anchor's center is aligned with the
562
- # pixel center
563
- base_anchors = [
564
- x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
565
- x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
566
- ]
567
- base_anchors = torch.stack(base_anchors, dim=-1).round()
568
-
569
- return base_anchors
570
-
571
-
572
- @ANCHOR_GENERATORS.register_module()
573
- class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
574
- """Legacy anchor generator used in MMDetection V1.x.
575
-
576
- The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
577
- can be found in `LegacyAnchorGenerator`.
578
- """
579
-
580
- def __init__(self,
581
- strides,
582
- ratios,
583
- basesize_ratio_range,
584
- input_size=300,
585
- scale_major=True):
586
- super(LegacySSDAnchorGenerator,
587
- self).__init__(strides, ratios, basesize_ratio_range, input_size,
588
- scale_major)
589
- self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
590
- for stride in strides]
591
- self.base_anchors = self.gen_base_anchors()
592
-
593
-
594
- @ANCHOR_GENERATORS.register_module()
595
- class YOLOAnchorGenerator(AnchorGenerator):
596
- """Anchor generator for YOLO.
597
-
598
- Args:
599
- strides (list[int] | list[tuple[int, int]]): Strides of anchors
600
- in multiple feature levels.
601
- base_sizes (list[list[tuple[int, int]]]): The basic sizes
602
- of anchors in multiple levels.
603
- """
604
-
605
- def __init__(self, strides, base_sizes):
606
- self.strides = [_pair(stride) for stride in strides]
607
- self.centers = [(stride[0] / 2., stride[1] / 2.)
608
- for stride in self.strides]
609
- self.base_sizes = []
610
- num_anchor_per_level = len(base_sizes[0])
611
- for base_sizes_per_level in base_sizes:
612
- assert num_anchor_per_level == len(base_sizes_per_level)
613
- self.base_sizes.append(
614
- [_pair(base_size) for base_size in base_sizes_per_level])
615
- self.base_anchors = self.gen_base_anchors()
616
-
617
- @property
618
- def num_levels(self):
619
- """int: number of feature levels that the generator will be applied"""
620
- return len(self.base_sizes)
621
-
622
- def gen_base_anchors(self):
623
- """Generate base anchors.
624
-
625
- Returns:
626
- list(torch.Tensor): Base anchors of a feature grid in multiple \
627
- feature levels.
628
- """
629
- multi_level_base_anchors = []
630
- for i, base_sizes_per_level in enumerate(self.base_sizes):
631
- center = None
632
- if self.centers is not None:
633
- center = self.centers[i]
634
- multi_level_base_anchors.append(
635
- self.gen_single_level_base_anchors(base_sizes_per_level,
636
- center))
637
- return multi_level_base_anchors
638
-
639
- def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
640
- """Generate base anchors of a single level.
641
-
642
- Args:
643
- base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
644
- anchors.
645
- center (tuple[float], optional): The center of the base anchor
646
- related to a single feature grid. Defaults to None.
647
-
648
- Returns:
649
- torch.Tensor: Anchors in a single-level feature maps.
650
- """
651
- x_center, y_center = center
652
- base_anchors = []
653
- for base_size in base_sizes_per_level:
654
- w, h = base_size
655
-
656
- # use float anchor and the anchor's center is aligned with the
657
- # pixel center
658
- base_anchor = torch.Tensor([
659
- x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
660
- y_center + 0.5 * h
661
- ])
662
- base_anchors.append(base_anchor)
663
- base_anchors = torch.stack(base_anchors, dim=0)
664
-
665
- return base_anchors
666
-
667
- def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
668
- """Generate responsible anchor flags of grid cells in multiple scales.
669
-
670
- Args:
671
- featmap_sizes (list(tuple)): List of feature map sizes in multiple
672
- feature levels.
673
- gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
674
- device (str): Device where the anchors will be put on.
675
-
676
- Return:
677
- list(torch.Tensor): responsible flags of anchors in multiple level
678
- """
679
- assert self.num_levels == len(featmap_sizes)
680
- multi_level_responsible_flags = []
681
- for i in range(self.num_levels):
682
- anchor_stride = self.strides[i]
683
- flags = self.single_level_responsible_flags(
684
- featmap_sizes[i],
685
- gt_bboxes,
686
- anchor_stride,
687
- self.num_base_anchors[i],
688
- device=device)
689
- multi_level_responsible_flags.append(flags)
690
- return multi_level_responsible_flags
691
-
692
- def single_level_responsible_flags(self,
693
- featmap_size,
694
- gt_bboxes,
695
- stride,
696
- num_base_anchors,
697
- device='cuda'):
698
- """Generate the responsible flags of anchor in a single feature map.
699
-
700
- Args:
701
- featmap_size (tuple[int]): The size of feature maps.
702
- gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
703
- stride (tuple(int)): stride of current level
704
- num_base_anchors (int): The number of base anchors.
705
- device (str, optional): Device where the flags will be put on.
706
- Defaults to 'cuda'.
707
-
708
- Returns:
709
- torch.Tensor: The valid flags of each anchor in a single level \
710
- feature map.
711
- """
712
- feat_h, feat_w = featmap_size
713
- gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
714
- gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
715
- gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
716
- gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
717
-
718
- # row major indexing
719
- gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
720
-
721
- responsible_grid = torch.zeros(
722
- feat_h * feat_w, dtype=torch.uint8, device=device)
723
- responsible_grid[gt_bboxes_grid_idx] = 1
724
-
725
- responsible_grid = responsible_grid[:, None].expand(
726
- responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
727
- return responsible_grid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/rpn_test_mixin.py DELETED
@@ -1,59 +0,0 @@
1
- import sys
2
-
3
- from mmdet.core import merge_aug_proposals
4
-
5
- if sys.version_info >= (3, 7):
6
- from mmdet.utils.contextmanagers import completed
7
-
8
-
9
- class RPNTestMixin(object):
10
- """Test methods of RPN."""
11
-
12
- if sys.version_info >= (3, 7):
13
-
14
- async def async_simple_test_rpn(self, x, img_metas):
15
- sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
16
- async with completed(
17
- __name__, 'rpn_head_forward',
18
- sleep_interval=sleep_interval):
19
- rpn_outs = self(x)
20
-
21
- proposal_list = self.get_bboxes(*rpn_outs, img_metas)
22
- return proposal_list
23
-
24
- def simple_test_rpn(self, x, img_metas):
25
- """Test without augmentation.
26
-
27
- Args:
28
- x (tuple[Tensor]): Features from the upstream network, each is
29
- a 4D-tensor.
30
- img_metas (list[dict]): Meta info of each image.
31
-
32
- Returns:
33
- list[Tensor]: Proposals of each image.
34
- """
35
- rpn_outs = self(x)
36
- proposal_list = self.get_bboxes(*rpn_outs, img_metas)
37
- return proposal_list
38
-
39
- def aug_test_rpn(self, feats, img_metas):
40
- samples_per_gpu = len(img_metas[0])
41
- aug_proposals = [[] for _ in range(samples_per_gpu)]
42
- for x, img_meta in zip(feats, img_metas):
43
- proposal_list = self.simple_test_rpn(x, img_meta)
44
- for i, proposals in enumerate(proposal_list):
45
- aug_proposals[i].append(proposals)
46
- # reorganize the order of 'img_metas' to match the dimensions
47
- # of 'aug_proposals'
48
- aug_img_metas = []
49
- for i in range(samples_per_gpu):
50
- aug_img_meta = []
51
- for j in range(len(img_metas)):
52
- aug_img_meta.append(img_metas[j][i])
53
- aug_img_metas.append(aug_img_meta)
54
- # after merging, proposals will be rescaled to the original image size
55
- merged_proposals = [
56
- merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
57
- for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
58
- ]
59
- return merged_proposals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/Detectron1-Comparisons/README.md DELETED
@@ -1,84 +0,0 @@
1
-
2
- Detectron2 model zoo's experimental settings and a few implementation details are different from Detectron.
3
-
4
- The differences in implementation details are shared in
5
- [Compatibility with Other Libraries](../../docs/notes/compatibility.md).
6
-
7
- The differences in model zoo's experimental settings include:
8
- * Use scale augmentation during training. This improves AP with lower training cost.
9
- * Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may
10
- affect other AP.
11
- * Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP.
12
- * Use `ROIAlignV2`. This does not significantly affect AP.
13
-
14
- In this directory, we provide a few configs that __do not__ have the above changes.
15
- They mimic Detectron's behavior as close as possible,
16
- and provide a fair comparison of accuracy and speed against Detectron.
17
-
18
- <!--
19
- ./gen_html_table.py --config 'Detectron1-Comparisons/*.yaml' --name "Faster R-CNN" "Keypoint R-CNN" "Mask R-CNN" --fields lr_sched train_speed inference_speed mem box_AP mask_AP keypoint_AP --base-dir ../../../configs/Detectron1-Comparisons
20
- -->
21
-
22
-
23
- <table><tbody>
24
- <!-- START TABLE -->
25
- <!-- TABLE HEADER -->
26
- <th valign="bottom">Name</th>
27
- <th valign="bottom">lr<br/>sched</th>
28
- <th valign="bottom">train<br/>time<br/>(s/iter)</th>
29
- <th valign="bottom">inference<br/>time<br/>(s/im)</th>
30
- <th valign="bottom">train<br/>mem<br/>(GB)</th>
31
- <th valign="bottom">box<br/>AP</th>
32
- <th valign="bottom">mask<br/>AP</th>
33
- <th valign="bottom">kp.<br/>AP</th>
34
- <th valign="bottom">model id</th>
35
- <th valign="bottom">download</th>
36
- <!-- TABLE BODY -->
37
- <!-- ROW: faster_rcnn_R_50_FPN_noaug_1x -->
38
- <tr><td align="left"><a href="faster_rcnn_R_50_FPN_noaug_1x.yaml">Faster R-CNN</a></td>
39
- <td align="center">1x</td>
40
- <td align="center">0.219</td>
41
- <td align="center">0.038</td>
42
- <td align="center">3.1</td>
43
- <td align="center">36.9</td>
44
- <td align="center"></td>
45
- <td align="center"></td>
46
- <td align="center">137781054</td>
47
- <td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x/137781054/model_final_7ab50c.pkl">model</a>&nbsp;|&nbsp;<a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x/137781054/metrics.json">metrics</a></td>
48
- </tr>
49
- <!-- ROW: keypoint_rcnn_R_50_FPN_1x -->
50
- <tr><td align="left"><a href="keypoint_rcnn_R_50_FPN_1x.yaml">Keypoint R-CNN</a></td>
51
- <td align="center">1x</td>
52
- <td align="center">0.313</td>
53
- <td align="center">0.071</td>
54
- <td align="center">5.0</td>
55
- <td align="center">53.1</td>
56
- <td align="center"></td>
57
- <td align="center">64.2</td>
58
- <td align="center">137781195</td>
59
- <td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x/137781195/model_final_cce136.pkl">model</a>&nbsp;|&nbsp;<a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x/137781195/metrics.json">metrics</a></td>
60
- </tr>
61
- <!-- ROW: mask_rcnn_R_50_FPN_noaug_1x -->
62
- <tr><td align="left"><a href="mask_rcnn_R_50_FPN_noaug_1x.yaml">Mask R-CNN</a></td>
63
- <td align="center">1x</td>
64
- <td align="center">0.273</td>
65
- <td align="center">0.043</td>
66
- <td align="center">3.4</td>
67
- <td align="center">37.8</td>
68
- <td align="center">34.9</td>
69
- <td align="center"></td>
70
- <td align="center">137781281</td>
71
- <td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x/137781281/model_final_62ca52.pkl">model</a>&nbsp;|&nbsp;<a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x/137781281/metrics.json">metrics</a></td>
72
- </tr>
73
- </tbody></table>
74
-
75
- ## Comparisons:
76
-
77
- * Faster R-CNN: Detectron's AP is 36.7, similar to ours.
78
- * Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's
79
- [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be
80
- compensated back by some parameter tuning.
81
- * Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation.
82
- See [this article](https://ppwwyyxx.com/blog/2021/Where-are-Pixels/) for details.
83
-
84
- For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/registry.py DELETED
@@ -1,66 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # -*- coding: utf-8 -*-
8
- # @Author: Yihao Chen
9
- # @Date: 2021-08-16 16:03:17
10
- # @Last Modified by: Shilong Liu
11
- # @Last Modified time: 2022-01-23 15:26
12
- # modified from mmcv
13
-
14
- import inspect
15
- from functools import partial
16
-
17
-
18
- class Registry(object):
19
- def __init__(self, name):
20
- self._name = name
21
- self._module_dict = dict()
22
-
23
- def __repr__(self):
24
- format_str = self.__class__.__name__ + "(name={}, items={})".format(
25
- self._name, list(self._module_dict.keys())
26
- )
27
- return format_str
28
-
29
- def __len__(self):
30
- return len(self._module_dict)
31
-
32
- @property
33
- def name(self):
34
- return self._name
35
-
36
- @property
37
- def module_dict(self):
38
- return self._module_dict
39
-
40
- def get(self, key):
41
- return self._module_dict.get(key, None)
42
-
43
- def registe_with_name(self, module_name=None, force=False):
44
- return partial(self.register, module_name=module_name, force=force)
45
-
46
- def register(self, module_build_function, module_name=None, force=False):
47
- """Register a module build function.
48
- Args:
49
- module (:obj:`nn.Module`): Module to be registered.
50
- """
51
- if not inspect.isfunction(module_build_function):
52
- raise TypeError(
53
- "module_build_function must be a function, but got {}".format(
54
- type(module_build_function)
55
- )
56
- )
57
- if module_name is None:
58
- module_name = module_build_function.__name__
59
- if not force and module_name in self._module_dict:
60
- raise KeyError("{} is already registered in {}".format(module_name, self.name))
61
- self._module_dict[module_name] = module_build_function
62
-
63
- return module_build_function
64
-
65
-
66
- MODULE_BUILD_FUNCS = Registry("model build functions")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/predictor.py DELETED
@@ -1,269 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import numpy as np
8
- import torch
9
-
10
- from segment_anything.modeling import Sam
11
-
12
- from typing import Optional, Tuple
13
-
14
- from .utils.transforms import ResizeLongestSide
15
-
16
-
17
- class SamPredictor:
18
- def __init__(
19
- self,
20
- sam_model: Sam,
21
- ) -> None:
22
- """
23
- Uses SAM to calculate the image embedding for an image, and then
24
- allow repeated, efficient mask prediction given prompts.
25
-
26
- Arguments:
27
- sam_model (Sam): The model to use for mask prediction.
28
- """
29
- super().__init__()
30
- self.model = sam_model
31
- self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
32
- self.reset_image()
33
-
34
- def set_image(
35
- self,
36
- image: np.ndarray,
37
- image_format: str = "RGB",
38
- ) -> None:
39
- """
40
- Calculates the image embeddings for the provided image, allowing
41
- masks to be predicted with the 'predict' method.
42
-
43
- Arguments:
44
- image (np.ndarray): The image for calculating masks. Expects an
45
- image in HWC uint8 format, with pixel values in [0, 255].
46
- image_format (str): The color format of the image, in ['RGB', 'BGR'].
47
- """
48
- assert image_format in [
49
- "RGB",
50
- "BGR",
51
- ], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
52
- if image_format != self.model.image_format:
53
- image = image[..., ::-1]
54
-
55
- # Transform the image to the form expected by the model
56
- input_image = self.transform.apply_image(image)
57
- input_image_torch = torch.as_tensor(input_image, device=self.device)
58
- input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
59
-
60
- self.set_torch_image(input_image_torch, image.shape[:2])
61
-
62
- @torch.no_grad()
63
- def set_torch_image(
64
- self,
65
- transformed_image: torch.Tensor,
66
- original_image_size: Tuple[int, ...],
67
- ) -> None:
68
- """
69
- Calculates the image embeddings for the provided image, allowing
70
- masks to be predicted with the 'predict' method. Expects the input
71
- image to be already transformed to the format expected by the model.
72
-
73
- Arguments:
74
- transformed_image (torch.Tensor): The input image, with shape
75
- 1x3xHxW, which has been transformed with ResizeLongestSide.
76
- original_image_size (tuple(int, int)): The size of the image
77
- before transformation, in (H, W) format.
78
- """
79
- assert (
80
- len(transformed_image.shape) == 4
81
- and transformed_image.shape[1] == 3
82
- and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
83
- ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
84
- self.reset_image()
85
-
86
- self.original_size = original_image_size
87
- self.input_size = tuple(transformed_image.shape[-2:])
88
- input_image = self.model.preprocess(transformed_image)
89
- self.features = self.model.image_encoder(input_image)
90
- self.is_image_set = True
91
-
92
- def predict(
93
- self,
94
- point_coords: Optional[np.ndarray] = None,
95
- point_labels: Optional[np.ndarray] = None,
96
- box: Optional[np.ndarray] = None,
97
- mask_input: Optional[np.ndarray] = None,
98
- multimask_output: bool = True,
99
- return_logits: bool = False,
100
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
101
- """
102
- Predict masks for the given input prompts, using the currently set image.
103
-
104
- Arguments:
105
- point_coords (np.ndarray or None): A Nx2 array of point prompts to the
106
- model. Each point is in (X,Y) in pixels.
107
- point_labels (np.ndarray or None): A length N array of labels for the
108
- point prompts. 1 indicates a foreground point and 0 indicates a
109
- background point.
110
- box (np.ndarray or None): A length 4 array given a box prompt to the
111
- model, in XYXY format.
112
- mask_input (np.ndarray): A low resolution mask input to the model, typically
113
- coming from a previous prediction iteration. Has form 1xHxW, where
114
- for SAM, H=W=256.
115
- multimask_output (bool): If true, the model will return three masks.
116
- For ambiguous input prompts (such as a single click), this will often
117
- produce better masks than a single prediction. If only a single
118
- mask is needed, the model's predicted quality score can be used
119
- to select the best mask. For non-ambiguous prompts, such as multiple
120
- input prompts, multimask_output=False can give better results.
121
- return_logits (bool): If true, returns un-thresholded masks logits
122
- instead of a binary mask.
123
-
124
- Returns:
125
- (np.ndarray): The output masks in CxHxW format, where C is the
126
- number of masks, and (H, W) is the original image size.
127
- (np.ndarray): An array of length C containing the model's
128
- predictions for the quality of each mask.
129
- (np.ndarray): An array of shape CxHxW, where C is the number
130
- of masks and H=W=256. These low resolution logits can be passed to
131
- a subsequent iteration as mask input.
132
- """
133
- if not self.is_image_set:
134
- raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
135
-
136
- # Transform input prompts
137
- coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
138
- if point_coords is not None:
139
- assert (
140
- point_labels is not None
141
- ), "point_labels must be supplied if point_coords is supplied."
142
- point_coords = self.transform.apply_coords(point_coords, self.original_size)
143
- coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
144
- labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
145
- coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
146
- if box is not None:
147
- box = self.transform.apply_boxes(box, self.original_size)
148
- box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
149
- box_torch = box_torch[None, :]
150
- if mask_input is not None:
151
- mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
152
- mask_input_torch = mask_input_torch[None, :, :, :]
153
-
154
- masks, iou_predictions, low_res_masks = self.predict_torch(
155
- coords_torch,
156
- labels_torch,
157
- box_torch,
158
- mask_input_torch,
159
- multimask_output,
160
- return_logits=return_logits,
161
- )
162
-
163
- masks = masks[0].detach().cpu().numpy()
164
- iou_predictions = iou_predictions[0].detach().cpu().numpy()
165
- low_res_masks = low_res_masks[0].detach().cpu().numpy()
166
- return masks, iou_predictions, low_res_masks
167
-
168
- @torch.no_grad()
169
- def predict_torch(
170
- self,
171
- point_coords: Optional[torch.Tensor],
172
- point_labels: Optional[torch.Tensor],
173
- boxes: Optional[torch.Tensor] = None,
174
- mask_input: Optional[torch.Tensor] = None,
175
- multimask_output: bool = True,
176
- return_logits: bool = False,
177
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
178
- """
179
- Predict masks for the given input prompts, using the currently set image.
180
- Input prompts are batched torch tensors and are expected to already be
181
- transformed to the input frame using ResizeLongestSide.
182
-
183
- Arguments:
184
- point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
185
- model. Each point is in (X,Y) in pixels.
186
- point_labels (torch.Tensor or None): A BxN array of labels for the
187
- point prompts. 1 indicates a foreground point and 0 indicates a
188
- background point.
189
- box (np.ndarray or None): A Bx4 array given a box prompt to the
190
- model, in XYXY format.
191
- mask_input (np.ndarray): A low resolution mask input to the model, typically
192
- coming from a previous prediction iteration. Has form Bx1xHxW, where
193
- for SAM, H=W=256. Masks returned by a previous iteration of the
194
- predict method do not need further transformation.
195
- multimask_output (bool): If true, the model will return three masks.
196
- For ambiguous input prompts (such as a single click), this will often
197
- produce better masks than a single prediction. If only a single
198
- mask is needed, the model's predicted quality score can be used
199
- to select the best mask. For non-ambiguous prompts, such as multiple
200
- input prompts, multimask_output=False can give better results.
201
- return_logits (bool): If true, returns un-thresholded masks logits
202
- instead of a binary mask.
203
-
204
- Returns:
205
- (torch.Tensor): The output masks in BxCxHxW format, where C is the
206
- number of masks, and (H, W) is the original image size.
207
- (torch.Tensor): An array of shape BxC containing the model's
208
- predictions for the quality of each mask.
209
- (torch.Tensor): An array of shape BxCxHxW, where C is the number
210
- of masks and H=W=256. These low res logits can be passed to
211
- a subsequent iteration as mask input.
212
- """
213
- if not self.is_image_set:
214
- raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
215
-
216
- if point_coords is not None:
217
- points = (point_coords, point_labels)
218
- else:
219
- points = None
220
-
221
- # Embed prompts
222
- sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
223
- points=points,
224
- boxes=boxes,
225
- masks=mask_input,
226
- )
227
-
228
- # Predict masks
229
- low_res_masks, iou_predictions = self.model.mask_decoder(
230
- image_embeddings=self.features,
231
- image_pe=self.model.prompt_encoder.get_dense_pe(),
232
- sparse_prompt_embeddings=sparse_embeddings,
233
- dense_prompt_embeddings=dense_embeddings,
234
- multimask_output=multimask_output,
235
- )
236
-
237
- # Upscale the masks to the original image resolution
238
- masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
239
-
240
- if not return_logits:
241
- masks = masks > self.model.mask_threshold
242
-
243
- return masks, iou_predictions, low_res_masks
244
-
245
- def get_image_embedding(self) -> torch.Tensor:
246
- """
247
- Returns the image embeddings for the currently set image, with
248
- shape 1xCxHxW, where C is the embedding dimension and (H,W) are
249
- the embedding spatial dimension of SAM (typically C=256, H=W=64).
250
- """
251
- if not self.is_image_set:
252
- raise RuntimeError(
253
- "An image must be set with .set_image(...) to generate an embedding."
254
- )
255
- assert self.features is not None, "Features must exist if an image has been set."
256
- return self.features
257
-
258
- @property
259
- def device(self) -> torch.device:
260
- return self.model.device
261
-
262
- def reset_image(self) -> None:
263
- """Resets the currently set image."""
264
- self.is_image_set = False
265
- self.features = None
266
- self.orig_h = None
267
- self.orig_w = None
268
- self.input_h = None
269
- self.input_w = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Casio991ms/MathBot/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Math_Word_Problem_Solver
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 2.9.4
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClementBM/connectfour/connectfour/app.py DELETED
@@ -1,186 +0,0 @@
1
- import time
2
-
3
- import gradio as gr
4
- import numpy as np
5
- import onnxruntime as ort
6
- from pettingzoo.classic import connect_four_v3
7
-
8
- from connectfour import ERROR_SCREEN
9
- from models import MODEL_PATH
10
-
11
- # poetry export -f requirements.txt --output requirements.txt --without-hashes
12
- # gradio connectfour/app.py
13
-
14
- session = ort.InferenceSession(str(MODEL_PATH), None)
15
- demo = gr.Blocks()
16
-
17
- column_count = 7
18
- game_on_msg = "Game On"
19
-
20
-
21
- def flatten_observation(obs):
22
- flatten_action_mask = np.array(obs["action_mask"])
23
- flatten_observation = np.reshape(obs["observation"], 2 * 6 * column_count)
24
- flatten_obs = np.concatenate([flatten_action_mask, flatten_observation])
25
- return flatten_obs[np.newaxis, ...].astype(np.float32)
26
-
27
-
28
- def legal_moves(env, player_id):
29
- return np.arange(column_count)[env.observe(player_id)["action_mask"] == 1]
30
-
31
-
32
- def done(env):
33
- return np.any(list(env.terminations.values()) + list(env.truncations.values()))
34
-
35
-
36
- def get_state_msg(env, human):
37
- if done(env):
38
- end_message = "End of the game"
39
- if env.rewards[human] > 0:
40
- end_message += ": You WIN !!"
41
- elif env.rewards[human] < 0:
42
- end_message += ": You LOSE !!"
43
- return end_message
44
-
45
- return game_on_msg
46
-
47
-
48
- def play(env, human, action=None):
49
- try:
50
- if human != env.agent_selection:
51
- action = session.run(
52
- ["output"],
53
- {
54
- "obs": flatten_observation(env.observe(env.agent_selection)),
55
- "state_ins": [],
56
- },
57
- )
58
- action = int(np.argmax(action[0]))
59
-
60
- if action not in legal_moves(env, env.agent_selection):
61
- action = np.random.choice(legal_moves(env, env.agent_selection))
62
-
63
- env.step(action)
64
- return env, get_state_msg(env, human)
65
- except Exception as e:
66
- return env, f"Restart the Game"
67
-
68
-
69
- def init_env(env, who_plays_first, human):
70
- env.reset()
71
-
72
- if who_plays_first != "You":
73
- play(env, human)
74
-
75
- return env
76
-
77
-
78
- def error_screen():
79
- with open(ERROR_SCREEN, "rb") as f:
80
- error_screen = np.load(f)
81
- return error_screen
82
-
83
-
84
- def create_env():
85
- return init_env(connect_four_v3.env(render_mode="rgb_array"), "You", "player_0")
86
-
87
-
88
- with demo:
89
- human = gr.State("player_0")
90
- env = gr.State(create_env())
91
-
92
- drop_token_btns = []
93
-
94
- with gr.Row():
95
- with gr.Column(scale=1):
96
- gr.Markdown("# Let's Play Connect Four !")
97
-
98
- who_plays_first = gr.Radio(
99
- label="Who plays first", choices=["You", "Bot"], value="You"
100
- )
101
- reinitialize = gr.Button("New Game")
102
- game_state = gr.Text(value=game_on_msg, interactive=False, label="Status")
103
-
104
- with gr.Column(scale=1):
105
- output = gr.Image(
106
- label="Connect Four Grid",
107
- type="numpy",
108
- show_label=False,
109
- value=error_screen(),
110
- )
111
-
112
- with gr.Row():
113
- for i in range(column_count):
114
- with gr.Column(min_width=20):
115
- drop_token_btns.append(gr.Button("X", elem_id=i))
116
-
117
- def reinit_game(env, who_plays_first, human):
118
- env = init_env(env, who_plays_first, human)
119
- return [
120
- env,
121
- env.agent_selection, # human
122
- get_state_msg(env, human), # state_msg
123
- gr.Checkbox.update(interactive=True), # who_plays_first
124
- ]
125
-
126
- def on_render_change(env):
127
- return env.render()
128
-
129
- def wait(game_state_value):
130
- if game_state_value == game_on_msg:
131
- time.sleep(0.7)
132
- return gr.Checkbox.update(interactive=False)
133
- else:
134
- return gr.Checkbox.update(interactive=True)
135
-
136
- def bot(env, game_state_value, human):
137
- if game_state_value == game_on_msg:
138
- env, state_msg = play(env, human)
139
- if state_msg == game_on_msg:
140
- return state_msg, gr.Checkbox.update(interactive=False)
141
- else:
142
- return state_msg, gr.Checkbox.update(interactive=True)
143
- return (
144
- game_state_value,
145
- gr.Checkbox.update(interactive=True),
146
- )
147
-
148
- def click_column(env, human, evt: gr.EventData):
149
- env, state_msg = play(env, human, int(evt.target.elem_id))
150
- return env, state_msg
151
-
152
- def game_state_change(value):
153
- return [gr.Button.update(interactive=value == game_on_msg)] * column_count
154
-
155
- who_plays_first.change(
156
- reinit_game,
157
- [env, who_plays_first, human],
158
- outputs=[env, human, game_state, who_plays_first],
159
- ).then(on_render_change, inputs=[env], outputs=[output])
160
-
161
- reinitialize.click(
162
- reinit_game,
163
- [env, who_plays_first, human],
164
- outputs=[env, human, game_state, who_plays_first],
165
- ).then(on_render_change, inputs=[env], outputs=[output])
166
-
167
- for i in range(column_count):
168
- drop_token_btns[i].click(
169
- click_column,
170
- inputs=[env, human],
171
- outputs=[env, game_state],
172
- ).then(on_render_change, inputs=[env], outputs=[output]).then(
173
- wait, inputs=[game_state], outputs=[who_plays_first]
174
- ).then(
175
- bot, inputs=[env, game_state, human], outputs=[game_state, who_plays_first]
176
- ).then(
177
- on_render_change, inputs=[env], outputs=[output]
178
- )
179
-
180
- game_state.change(
181
- game_state_change,
182
- game_state,
183
- outputs=drop_token_btns,
184
- )
185
-
186
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/__init__.py DELETED
File without changes
spaces/Cpp4App/Cpp4App/SEM/P1_PP_processing.py DELETED
@@ -1,120 +0,0 @@
1
- import os
2
- import time
3
- import shutil
4
-
5
- from bs4 import BeautifulSoup
6
-
7
- from find_subtitle import find_title_Label
8
- from get_text import write_text, write_text_without_label, removeUnneccessaryElements, makeCoarseSegments
9
- from types_pp_processing import caculateSim, getSentences, getSentences_no_classifier, getSentences_with_classifier
10
- # from children_pp_processing import process_specialGroup
11
- # from region_pp_processing import get_alifornia
12
- # from retention_pp_processing import retention_process
13
- # from clean_txt import cleaning_txt
14
-
15
- if __name__ == '__main__':
16
- # INPUT = "../dataset/privacy_policies_html/"
17
- INPUT = "./pp_example/"
18
- # cleaning_txt("./txt")
19
- # os.mkdir("./txt")
20
- if os.path.exists("./txt"):
21
- shutil.rmtree("./txt")
22
- os.makedirs("./txt")
23
-
24
- for file in os.listdir(INPUT):
25
-
26
- segmentation_start_time = time.clock()
27
-
28
- pathName = os.path.basename(file)
29
- if pathName == ".DS_Store":
30
- continue
31
- path = INPUT+pathName
32
- label = find_title_Label(path)
33
- print("The current file is:" + pathName)
34
-
35
- # if pathName != '20.html':
36
- # continue
37
-
38
- para_start_time = time.clock()
39
- soup = BeautifulSoup(open(path,encoding='utf-8'), features="html.parser")
40
- title_list = soup.find_all(label)
41
- # cleaning_txt()
42
-
43
- if not os.path.exists('./txt/' + pathName[:-5]):
44
- os.mkdir('./txt/' + pathName[:-5])
45
-
46
- if len(title_list) == 0 or pathName == '20.html' or pathName == '29.html' or pathName == '25.html' or pathName == '8.html' or pathName == '27.html' or pathName == '28.html':
47
- # write_text_without_label(soup.getText(), pathName)
48
- removeUnneccessaryElements(soup)
49
- result = makeCoarseSegments(soup)
50
- for seg in result:
51
- with open('./txt/' + pathName[:-5] + '/data_types.txt', "a", encoding='utf-8') as f:
52
-
53
- f.write(seg)
54
- f.write("\n")
55
- else:
56
- write_text(title_list, pathName)
57
- print("Paragraph level processing time: %2.2f s" % (time.clock() - para_start_time))
58
-
59
- for t in title_list:
60
- with open('./txt/' + pathName[:-5] + '/headings.txt', "a", encoding='utf-8') as g:
61
- g.write(str(t))
62
- g.write("\n")
63
-
64
- # data types
65
- if not os.path.exists("./txt/"+pathName[:-5]+"/data_types.txt"):
66
- print("No information about data types!")
67
- else:
68
- sen_start_time = time.clock()
69
- # all_types = caculateSim("./txt/"+pathName[:-5]+"/data_types.txt")
70
- dict_sentences, dict_index = getSentences_with_classifier("./txt/" + pathName[:-5] + "/data_types.txt")
71
- print("sentence level processing time: %2.2f s" % (time.clock() - sen_start_time))
72
-
73
- os.makedirs("./txt/"+pathName[:-5]+"/classified_sentences")
74
- for key in dict_sentences:
75
-
76
- if dict_sentences[key] == "":
77
- continue
78
- with open('./txt/' + pathName[:-5] + "/classified_sentences/" + key + ".txt", "a", encoding='utf-8') as g:
79
- g.write(dict_sentences[key])
80
-
81
- for key in dict_index:
82
- with open('./txt/' + pathName[:-5] + "/classified_sentences/keyword_index.txt", "a", encoding='utf-8') as f:
83
- f.write(key + ":" + str(dict_index[key]) + "\n")
84
-
85
-
86
- # #children
87
- # if not os.path.exists("./txt/"+pathName[:-5]+"/children.txt"):
88
- # print("No information about children!")
89
- # else:
90
- # age , rule, childUse, specialGroup = process_specialGroup("./txt/"+pathName[:-5]+"/children.txt")
91
- # # print("children age is :")
92
- # print("D.CHILDREN.age : " + str(age))
93
- # if childUse == 1:
94
- # print(" the skill’s privacy policy states that it does not collect any information from children")
95
- # print("D.CHILDREN.[CTypes] = [ ]")
96
- # else:
97
- # # print("D.CHILDREN.[CTypes] :" + str(all_types))
98
- # None
99
- # #region
100
- # if not os.path.exists("./txt/"+pathName[:-5]+"/region.txt"):
101
- # print("No information about region!")
102
- # else:
103
- # specialArea,california = get_alifornia("./txt/"+pathName[:-5]+"/region.txt")
104
- # if california == 1:
105
- # print("D.REGIONS.region :California")
106
- # print("D.REGIONS.delete : Yes")
107
- # else:
108
- # print("D.REGIONS.region :No mention")
109
- # print("D.REGIONS.delete : No")
110
- #
111
- # #retention
112
- # if not os.path.exists("./txt/"+pathName[:-5]+"/data_retention.txt"):
113
- # print("No information about data retention!")
114
- # else:
115
- # retention_time, text = retention_process("./txt/"+pathName[:-5]+"/data_retention.txt")
116
- # print("D.RETENTION.period :"+ retention_time)
117
- # # cleaning_txt()
118
- # print("-------------------------------------------------------")
119
-
120
- print("time cost for segmentation: %2.2f s" % (time.clock() - segmentation_start_time))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/matcher.py DELETED
@@ -1,112 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import torch
3
-
4
-
5
- class Matcher(object):
6
- """
7
- This class assigns to each predicted "element" (e.g., a box) a ground-truth
8
- element. Each predicted element will have exactly zero or one matches; each
9
- ground-truth element may be assigned to zero or more predicted elements.
10
-
11
- Matching is based on the MxN match_quality_matrix, that characterizes how well
12
- each (ground-truth, predicted)-pair match. For example, if the elements are
13
- boxes, the matrix may contain box IoU overlap values.
14
-
15
- The matcher returns a tensor of size N containing the index of the ground-truth
16
- element m that matches to prediction n. If there is no match, a negative value
17
- is returned.
18
- """
19
-
20
- BELOW_LOW_THRESHOLD = -1
21
- BETWEEN_THRESHOLDS = -2
22
-
23
- def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
24
- """
25
- Args:
26
- high_threshold (float): quality values greater than or equal to
27
- this value are candidate matches.
28
- low_threshold (float): a lower quality threshold used to stratify
29
- matches into three levels:
30
- 1) matches >= high_threshold
31
- 2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
32
- 3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
33
- allow_low_quality_matches (bool): if True, produce additional matches
34
- for predictions that have only low-quality match candidates. See
35
- set_low_quality_matches_ for more details.
36
- """
37
- assert low_threshold <= high_threshold
38
- self.high_threshold = high_threshold
39
- self.low_threshold = low_threshold
40
- self.allow_low_quality_matches = allow_low_quality_matches
41
-
42
- def __call__(self, match_quality_matrix):
43
- """
44
- Args:
45
- match_quality_matrix (Tensor[float]): an MxN tensor, containing the
46
- pairwise quality between M ground-truth elements and N predicted elements.
47
-
48
- Returns:
49
- matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
50
- [0, M - 1] or a negative value indicating that prediction i could not
51
- be matched.
52
- """
53
- if match_quality_matrix.numel() == 0:
54
- # empty targets or proposals not supported during training
55
- if match_quality_matrix.shape[0] == 0:
56
- raise ValueError(
57
- "No ground-truth boxes available for one of the images "
58
- "during training")
59
- else:
60
- raise ValueError(
61
- "No proposal boxes available for one of the images "
62
- "during training")
63
-
64
- # match_quality_matrix is M (gt) x N (predicted)
65
- # Max over gt elements (dim 0) to find best gt candidate for each prediction
66
- matched_vals, matches = match_quality_matrix.max(dim=0)
67
- if self.allow_low_quality_matches:
68
- all_matches = matches.clone()
69
-
70
- # Assign candidate matches with low quality to negative (unassigned) values
71
- below_low_threshold = matched_vals < self.low_threshold
72
- between_thresholds = (matched_vals >= self.low_threshold) & (
73
- matched_vals < self.high_threshold
74
- )
75
- matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD
76
- matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS
77
-
78
- if self.allow_low_quality_matches:
79
- self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
80
-
81
- return matches
82
-
83
- def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
84
- """
85
- Produce additional matches for predictions that have only low-quality matches.
86
- Specifically, for each ground-truth find the set of predictions that have
87
- maximum overlap with it (including ties); for each prediction in that set, if
88
- it is unmatched, then match it to the ground-truth with which it has the highest
89
- quality value.
90
- """
91
- # For each gt, find the prediction with which it has highest quality
92
- highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
93
- # Find highest quality match available, even if it is low, including ties
94
- gt_pred_pairs_of_highest_quality = torch.nonzero(
95
- match_quality_matrix == highest_quality_foreach_gt[:, None]
96
- )
97
- # Example gt_pred_pairs_of_highest_quality:
98
- # tensor([[ 0, 39796],
99
- # [ 1, 32055],
100
- # [ 1, 32070],
101
- # [ 2, 39190],
102
- # [ 2, 40255],
103
- # [ 3, 40390],
104
- # [ 3, 41455],
105
- # [ 4, 45470],
106
- # [ 5, 45325],
107
- # [ 5, 46390]])
108
- # Each row is a (gt index, prediction index)
109
- # Note how gt items 1, 2, 3, and 5 each have two ties
110
-
111
- pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
112
- matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/my_abi/docker/Dockerfile DELETED
@@ -1,25 +0,0 @@
1
- FROM anibali/pytorch:cuda-9.0
2
- MAINTAINER fangshancheng <[email protected]>
3
- RUN sudo rm -rf /etc/apt/sources.list.d && \
4
- sudo apt update && \
5
- sudo apt install -y build-essential vim && \
6
- conda config --add channels https://mirrors.ustc.edu.cn/anaconda/pkgs/free/ && \
7
- conda config --add channels https://mirrors.ustc.edu.cn/anaconda/pkgs/main/ && \
8
- conda config --set show_channel_urls yes && \
9
- pip config set global.index-url https://mirrors.aliyun.com/pypi/simple/ && \
10
- pip install torch==1.1.0 torchvision==0.3.0 && \
11
- pip install fastai==1.0.60 && \
12
- pip install ipdb jupyter ipython lmdb editdistance tensorboardX natsort nltk && \
13
- conda uninstall -y --force pillow pil jpeg libtiff libjpeg-turbo && \
14
- pip uninstall -y pillow pil jpeg libtiff libjpeg-turbo && \
15
- conda install -yc conda-forge libjpeg-turbo && \
16
- CFLAGS="${CFLAGS} -mavx2" pip install --no-cache-dir --force-reinstall --no-binary :all: --compile pillow-simd==6.2.2.post1 && \
17
- conda install -y jpeg libtiff opencv && \
18
- sudo rm -rf /var/lib/apt/lists/* && \
19
- sudo rm -rf /tmp/* && \
20
- sudo rm -rf ~/.cache && \
21
- sudo apt clean all && \
22
- conda clean -y -a
23
- EXPOSE 8888
24
- ENV LANG C.UTF-8
25
- ENV LC_ALL C.UTF-8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/QoiImagePlugin.py DELETED
@@ -1,105 +0,0 @@
1
- #
2
- # The Python Imaging Library.
3
- #
4
- # QOI support for PIL
5
- #
6
- # See the README file for information on usage and redistribution.
7
- #
8
-
9
- import os
10
-
11
- from . import Image, ImageFile
12
- from ._binary import i32be as i32
13
- from ._binary import o8
14
-
15
-
16
- def _accept(prefix):
17
- return prefix[:4] == b"qoif"
18
-
19
-
20
- class QoiImageFile(ImageFile.ImageFile):
21
- format = "QOI"
22
- format_description = "Quite OK Image"
23
-
24
- def _open(self):
25
- if not _accept(self.fp.read(4)):
26
- msg = "not a QOI file"
27
- raise SyntaxError(msg)
28
-
29
- self._size = tuple(i32(self.fp.read(4)) for i in range(2))
30
-
31
- channels = self.fp.read(1)[0]
32
- self.mode = "RGB" if channels == 3 else "RGBA"
33
-
34
- self.fp.seek(1, os.SEEK_CUR) # colorspace
35
- self.tile = [("qoi", (0, 0) + self._size, self.fp.tell(), None)]
36
-
37
-
38
- class QoiDecoder(ImageFile.PyDecoder):
39
- _pulls_fd = True
40
-
41
- def _add_to_previous_pixels(self, value):
42
- self._previous_pixel = value
43
-
44
- r, g, b, a = value
45
- hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
46
- self._previously_seen_pixels[hash_value] = value
47
-
48
- def decode(self, buffer):
49
- self._previously_seen_pixels = {}
50
- self._previous_pixel = None
51
- self._add_to_previous_pixels(b"".join(o8(i) for i in (0, 0, 0, 255)))
52
-
53
- data = bytearray()
54
- bands = Image.getmodebands(self.mode)
55
- while len(data) < self.state.xsize * self.state.ysize * bands:
56
- byte = self.fd.read(1)[0]
57
- if byte == 0b11111110: # QOI_OP_RGB
58
- value = self.fd.read(3) + o8(255)
59
- elif byte == 0b11111111: # QOI_OP_RGBA
60
- value = self.fd.read(4)
61
- else:
62
- op = byte >> 6
63
- if op == 0: # QOI_OP_INDEX
64
- op_index = byte & 0b00111111
65
- value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0))
66
- elif op == 1: # QOI_OP_DIFF
67
- value = (
68
- (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)
69
- % 256,
70
- (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)
71
- % 256,
72
- (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,
73
- )
74
- value += (self._previous_pixel[3],)
75
- elif op == 2: # QOI_OP_LUMA
76
- second_byte = self.fd.read(1)[0]
77
- diff_green = (byte & 0b00111111) - 32
78
- diff_red = ((second_byte & 0b11110000) >> 4) - 8
79
- diff_blue = (second_byte & 0b00001111) - 8
80
-
81
- value = tuple(
82
- (self._previous_pixel[i] + diff_green + diff) % 256
83
- for i, diff in enumerate((diff_red, 0, diff_blue))
84
- )
85
- value += (self._previous_pixel[3],)
86
- elif op == 3: # QOI_OP_RUN
87
- run_length = (byte & 0b00111111) + 1
88
- value = self._previous_pixel
89
- if bands == 3:
90
- value = value[:3]
91
- data += value * run_length
92
- continue
93
- value = b"".join(o8(i) for i in value)
94
- self._add_to_previous_pixels(value)
95
-
96
- if bands == 3:
97
- value = value[:3]
98
- data += value
99
- self.set_as_raw(bytes(data))
100
- return -1, 0
101
-
102
-
103
- Image.register_open(QoiImageFile.format, QoiImageFile, _accept)
104
- Image.register_decoder("qoi", QoiDecoder)
105
- Image.register_extension(QoiImageFile.format, ".qoi")