Commit
·
ccd43be
1
Parent(s):
fde2cf7
Update parquet files (step 57 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/aicolors/typings/__init__.py +0 -9
- spaces/1phancelerku/anime-remove-background/Download 1 Pound by Brymo the Nigerian Singer Songwriter and Author.md +0 -83
- spaces/1phancelerku/anime-remove-background/Download quiz submissions in Canvas and use Quiz statistics to analyze them.md +0 -119
- spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +0 -553
- spaces/2ndelement/voicevox/voicevox_engine/setting/SettingLoader.py +0 -33
- spaces/4th3n4/TraDeX/app-plain.py +0 -957
- spaces/7hao/bingo/src/components/ui/dropdown-menu.tsx +0 -128
- spaces/AIFILMS/StyleGANEX/datasets/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/align.py +0 -115
- spaces/AIZ2H/04-Gradio-SOTA-Seq2Seq-AutoQA/README.md +0 -13
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/README.md +0 -50
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192.py +0 -172
- spaces/AdityaMahimkar/PlagiarismChecker/app.py +0 -48
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/warppipeline.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/canvasinput/CanvasInput.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/MenuSetInteractive.js +0 -45
- spaces/Alican/pixera/util/img2pixl.py +0 -115
- spaces/Amrrs/DragGan-Inversion/visualizer_drag_gradio_inversion.py +0 -1002
- spaces/AndreLie95/Diabetes_Risk_Prediction/app.py +0 -76
- spaces/Andy1621/uniformer_image_demo/uniformer.py +0 -366
- spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context.py +0 -10
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/midas_net.py +0 -76
- spaces/Apex-X/nono/roop/utilities.py +0 -149
- spaces/Ariharasudhan/YoloV5/utils/activations.py +0 -103
- spaces/Asahi402/White-box-Cartoonization/app.py +0 -108
- spaces/Audio-AGI/WavJourney/add_voice_preset.py +0 -21
- spaces/Banbri/zcvzcv/src/lib/computeSecretFingerprint.ts +0 -7
- spaces/Bart92/RVC_HF/infer/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +0 -91
- spaces/Bart92/RVC_HF/infer/lib/infer_pack/onnx_inference.py +0 -149
- spaces/Benson/text-generation/Examples/Apk Chicos Tropiezo Para Ipad.md +0 -63
- spaces/Benson/text-generation/Examples/Asfalto 8 - Juego De Carreras De Coches Apk.md +0 -80
- spaces/Benson/text-generation/Examples/Descargar 50 Cent 21.md +0 -108
- spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Max Galaxy Store.md +0 -54
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/winterm_test.py +0 -131
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/msvc9compiler.py +0 -832
- spaces/BramVanroy/spacey_conll/Dockerfile +0 -105
- spaces/CALM/Dashboard/streamlit_observable/frontend/build/static/js/2.b1c975ff.chunk.js +0 -0
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md +0 -45
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/data/clevr/clevr_extract_feat.py +0 -151
- spaces/CVPR/Dual-Key_Backdoor_Attacks/utils/data_tools.py +0 -135
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/temporary_buffer.h +0 -22
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/get_value.h +0 -44
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/tabulate.h +0 -23
- spaces/CVPR/WALT/walt/datasets/walt_synthetic.py +0 -781
- spaces/CVPR/lama-example/saicinpainting/training/__init__.py +0 -0
- spaces/CVPR/lama-example/saicinpainting/training/visualizers/colors.py +0 -76
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/utils.py +0 -608
- spaces/ChandraMohanNayal/AutoGPT/ui/api.py +0 -146
- spaces/Chintan-Donda/KKMS-KSSW-HF/src/mandi_price.py +0 -33
spaces/101-5/gpt4free/g4f/.v1/gpt4free/aicolors/typings/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
|
3 |
-
|
4 |
-
@dataclass
|
5 |
-
class AiColorsResponse:
|
6 |
-
background: str
|
7 |
-
primary: str
|
8 |
-
accent: str
|
9 |
-
text: str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download 1 Pound by Brymo the Nigerian Singer Songwriter and Author.md
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Brymo 1 Pound Mp3 Download: Everything You Need to Know</h1>
|
3 |
-
<p>If you are a fan of Nigerian music, you have probably heard of Brymo, one of the most talented and versatile artists in the industry. His song 1 Pound is a masterpiece that showcases his unique style and voice. In this article, we will tell you everything you need to know about Brymo and 1 Pound, including who he is, what the song is about, how to download it and why you should listen to it.</p>
|
4 |
-
<h2>brymo 1 pound mp3 download</h2><br /><p><b><b>Download File</b> --->>> <a href="https://jinyurl.com/2uNLLk">https://jinyurl.com/2uNLLk</a></b></p><br /><br />
|
5 |
-
<h2>Who is Brymo?</h2>
|
6 |
-
<p>Brymo is the stage name of Olawale Ashimi, a Nigerian singer, songwriter, composer and author. He was born on May 9, 1986 in Okokomaiko, Ojo, Lagos State. He started recording music in 1999 while in secondary school. He signed a record deal with Chocolate City in 2010 but left the label in 2013 due to contractual disputes. He has since released several albums independently, such as Merchants, Dealers & Slaves (2013), Tabula Rasa (2014), Klĭtôrĭs (2016), Oṣó (2018), Yellow (2020) and Theta (2022). He has also published three books: Oriri's Plight (2018), Verses (2020) and The Bad Tooth (2022). He is widely regarded as one of the best vocalists and lyricists in Nigeria. He has won several awards and nominations for his music, such as The Headies, Nigeria Entertainment Awards and All Africa Music Awards. He is known for his fusion of various genres, such as folk, pop, afrobeat, soul and rock. He sings mostly in English and Yoruba languages.</p>
|
7 |
-
<h2>What is 1 Pound?</h2>
|
8 |
-
<p>1 Pound is a song by Brymo that was released in 2021 as part of his compilation album Trance. The song is an alternative rock track that features electric guitars, drums and keyboards. The song is about Brymo's journey as an artist and his determination to succeed despite the challenges he faces. The title refers to the British currency that Brymo uses as a metaphor for his value and worth. The lyrics are motivational and empowering, as Brymo sings about his dreams, his struggles and his achievements. He also encourages his listeners to pursue their own goals and not give up on their passions. Some of the lines from the song are:</p>
|
9 |
-
<blockquote>
|
10 |
-
<p>"I'm worth more than a pound<br>
|
11 |
-
But they don't wanna pay me<br>
|
12 |
-
They say I'm too loud<br>
|
13 |
-
But they don't wanna hear me"</p>
|
14 |
-
<p>"I've been around<br>
|
15 |
-
But they don't wanna see me<br>
|
16 |
-
They say I'm too proud<br>
|
17 |
-
But they don't wanna feel me"</p>
|
18 |
-
<p>"I'm a star in the sky<br>
|
19 |
-
But they don't wanna look up<br>
|
20 |
-
They say I'm too high<br>
|
21 |
-
But they don't wanna hook up"</p>
|
22 |
-
</blockquote>
|
23 |
-
<h2>How to download 1 Pound mp3?</h2>
|
24 |
-
<p>If you want to download 1 Pound mp3 by Brymo, you have several options to choose from. You can either stream or download the song from various platforms online. Here are some of the ways you can get the song:</p>
|
25 |
-
<ul>
|
26 |
-
<li><strong>PraiseZion</strong>: This is a website that offers free downloads of gospel and inspirational songs from Nigerian artists. You can find 1 Pound by Brymo on this site by searching for the song title or the artist name. You can also browse through the categories and genres to find similar songs. To download the song, you need to click on the download button and wait for the file to be saved on your device. You can also play the song online before downloading it. The website also provides the lyrics and the video of the song for your convenience.</li>
|
27 |
-
<li><strong>iTunes</strong>: This is a digital media store that offers music, podcasts, movies, TV shows and more. You can access iTunes from your computer, smartphone or tablet. You need to have an Apple ID and a payment method to purchase content from iTunes. You can find 1 Pound by Brymo on iTunes by searching for the song title or the artist name. You can also browse through the categories and genres to find similar songs. To download the song, you need to click on the buy button and confirm your purchase. The song will be added to your library and you can sync it with your other devices. You can also stream the song online using Apple Music.</li>
|
28 |
-
<li><strong>Spotify</strong>: This is a streaming service that offers music, podcasts, playlists and more. You can access Spotify from your computer, smartphone or tablet. You need to have a Spotify account and an internet connection to use Spotify. You can find 1 Pound by Brymo on Spotify by searching for the song title or the artist name. You can also browse through the categories and genres to find similar songs. To download the song, you need to have a Spotify Premium subscription, which allows you to download up to 10,000 songs on five devices. You can also stream the song online using Spotify Free or Spotify Premium.</li>
|
29 |
-
<li><strong>YouTube</strong>: This is a video-sharing platform that offers videos, music, live streams, channels and more. You can access YouTube from your computer, smartphone or tablet. You do not need an account to watch videos on YouTube, but you need one to upload, comment, like and subscribe. You can find 1 Pound by Brymo on YouTube by searching for the song title or the artist name. You can also browse through the categories and genres to find similar songs. To download the song, you need to use a third-party tool, such as YouTube Downloader, which allows you to convert and save YouTube videos as mp3 files on your device. You can also stream the song online using YouTube.</li>
|
30 |
-
</ul>
|
31 |
-
<h2>Why you should listen to 1 Pound?</h2>
|
32 |
-
<p>Now that you know how to download 1 Pound mp3 by Brymo, you might be wondering why you should listen to it in the first place. Well, there are many reasons why this song is worth your time and attention. Here are some of them:</p>
|
33 |
-
<ul>
|
34 |
-
<li><strong>It is inspirational</strong>: The song is a motivational anthem that inspires you to chase your dreams and overcome your obstacles. It tells you that you are valuable and capable of achieving anything you set your mind to. It also reminds you that you are not alone in your journey and that there are people who support and appreciate you.</li>
|
35 |
-
<li><strong>It is cultural</strong>: The song is a reflection of Brymo's Nigerian identity and heritage. It incorporates elements of his native language, Yoruba, as well as references to his country's history, politics and society. It also celebrates his African roots and expresses his pride in his continent.</li>
|
36 |
-
<li><strong>It is artistic</strong>: The song is a showcase of Brymo's musical talent and creativity. It features his distinctive voice, which ranges from soft and smooth to powerful and passionate. It also displays his lyrical skills, which are poetic, witty and profound. It also demonstrates his musical versatility, which blends different genres and influences into a cohesive and original sound.</li>
|
37 |
-
</ul>
|
38 |
-
<h2>Conclusion</h2>
|
39 |
-
<p>In conclusion, 1 Pound by Brymo is a song that you should not miss out on. It is a song that tells you about Brymo's life story and his artistic vision. It is a song that teaches you about Nigerian culture and history. It is a song that motivates you to pursue your goals and passions. It is a song that entertains you with its catchy melody and captivating performance. It is a song that deserves your download and your listen.</p>
|
40 |
-
<p>So what are you waiting for? Go ahead and download 1 Pound mp3 by Brymo today and enjoy this amazing track!</p>
|
41 |
-
<p>brymo 1 pound song lyrics and video<br />
|
42 |
-
brymo 1 pound audio download free<br />
|
43 |
-
brymo 1 pound mp3 download praisezion<br />
|
44 |
-
brymo 1 pound naija music download<br />
|
45 |
-
brymo 1 pound latest song 2021<br />
|
46 |
-
brymo 1 pound mp3 download tooxclusive<br />
|
47 |
-
brymo 1 pound official video youtube<br />
|
48 |
-
brymo 1 pound song meaning and review<br />
|
49 |
-
brymo 1 pound mp3 download naijaloaded<br />
|
50 |
-
brymo 1 pound album download zip<br />
|
51 |
-
brymo 1 pound instrumental download<br />
|
52 |
-
brymo 1 pound mp3 download fakaza<br />
|
53 |
-
brymo 1 pound song download waploaded<br />
|
54 |
-
brymo 1 pound mp3 download skull<br />
|
55 |
-
brymo 1 pound remix ft davido<br />
|
56 |
-
brymo 1 pound mp3 download mdundo<br />
|
57 |
-
brymo 1 pound song download audiomack<br />
|
58 |
-
brymo 1 pound mp3 download musicpleer<br />
|
59 |
-
brymo 1 pound live performance video<br />
|
60 |
-
brymo 1 pound mp3 download justnaija<br />
|
61 |
-
brymo 1 pound song download pagalworld<br />
|
62 |
-
brymo 1 pound mp3 download naijavibes<br />
|
63 |
-
brymo 1 pound song download mr jatt<br />
|
64 |
-
brymo 1 pound mp3 download waptrick<br />
|
65 |
-
brymo 1 pound cover by johnny drille<br />
|
66 |
-
brymo 1 pound mp3 download afrobeat.co.za<br />
|
67 |
-
brymo 1 pound song download djpunjab<br />
|
68 |
-
brymo 1 pound mp3 download flexyjam<br />
|
69 |
-
brymo 1 pound acoustic version download<br />
|
70 |
-
brymo 1 pound mp3 download zamusic</p>
|
71 |
-
<h2>FAQs</h2>
|
72 |
-
<h3>Q: When did Brymo release 1 Pound?</h3>
|
73 |
-
<p>A: Brymo released 1 Pound in 2021 as part of his compilation album Trance.</p>
|
74 |
-
<h3>Q: What does 1 Pound mean?</h3>
|
75 |
-
<p>A: 1 Pound is a metaphor for Brymo's value and worth as an artist and as a person.</ p>A: 1 Pound is a metaphor for Brymo's value and worth as an artist and as a person. He uses the British currency to symbolize his international recognition and appeal.</p>
|
76 |
-
<h3>Q: What genre is 1 Pound?</h3>
|
77 |
-
<p>A: 1 Pound is an alternative rock song that features electric guitars, drums and keyboards. It is influenced by Brymo's diverse musical tastes and experiences.</p>
|
78 |
-
<h3>Q: Where can I download 1 Pound mp3?</h3>
|
79 |
-
<p>A: You can download 1 Pound mp3 from various platforms online, such as PraiseZion, iTunes, Spotify and YouTube. You can also stream the song online using these platforms.</p>
|
80 |
-
<h3>Q: What are some of the benefits of listening to 1 Pound?</h3>
|
81 |
-
<p>A: Some of the benefits of listening to 1 Pound are that it is inspirational, cultural and artistic. It inspires you to chase your dreams and overcome your obstacles. It teaches you about Nigerian culture and history. It entertains you with its catchy melody and captivating performance.</p> 401be4b1e0<br />
|
82 |
-
<br />
|
83 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download quiz submissions in Canvas and use Quiz statistics to analyze them.md
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Quiz Submissions in Canvas</h1>
|
3 |
-
<p>Canvas is a popular learning management system (LMS) that allows instructors and students to create, manage, and share online courses. Canvas offers various features and tools to enhance online learning, such as quizzes, assignments, discussions, grades, and more.</p>
|
4 |
-
<p>Quizzes are one of the most common types of assessments in Canvas. They can be used to test students' knowledge, skills, or understanding of a topic. Quizzes can be created using either the legacy quiz tool or the new quiz tool in Canvas. Quizzes can have different question types, such as multiple choice, true/false, fill-in-the-blank, essay, etc.</p>
|
5 |
-
<h2>canvas download quiz submissions</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://jinyurl.com/2uNTGs">https://jinyurl.com/2uNTGs</a></b></p><br /><br />
|
6 |
-
<p>Sometimes, you might want to download quiz submissions from Canvas for various reasons. For example, you might want to:</p>
|
7 |
-
<ul>
|
8 |
-
<li>Analyze the quiz results offline or with another software</li>
|
9 |
-
<li>Print or share the quiz responses with others</li>
|
10 |
-
<li>Keep a backup or archive of the quiz submissions</li>
|
11 |
-
<li>Review or grade the quiz submissions manually</li>
|
12 |
-
</ul>
|
13 |
-
<p>Downloading quiz submissions from Canvas can be beneficial, but it can also pose some challenges. For instance, you might encounter issues such as:</p>
|
14 |
-
<ul>
|
15 |
-
<li>Not being able to download quiz submissions for certain question types or formats</li>
|
16 |
-
<li>Not being able to download quiz submissions with instructor annotations or feedback</li>
|
17 |
-
<li>Not being able to download quiz submissions for group assignments or quizzes that are not graded</li>
|
18 |
-
<li>Not being able to download quiz submissions for quizzes that have been concluded or deleted</li>
|
19 |
-
<li>Not being able to view or open the downloaded files properly</li>
|
20 |
-
</ul>
|
21 |
-
<p>In this article, we will show you how to download quiz submissions in Canvas using different methods. We will also provide some tips and best practices for downloading quiz submissions effectively. Let's get started!</p>
|
22 |
-
<h2>How to Download Quiz Submissions in Canvas Legacy Quizzes</h2>
|
23 |
-
<p>The legacy quiz tool is the original quiz tool in Canvas. It allows you to create quizzes with various question types and settings. You can also view quiz statistics and reports for each quiz.</p>
|
24 |
-
<p>To download quiz submissions in Canvas legacy quizzes, follow these steps:</p>
|
25 |
-
<h3>Step 1: Go to the quiz page and click on "Download Submissions"</h3>
|
26 |
-
<p>Navigate to your course site and click on "Quizzes" in the left-hand menu. Then, click on the name of the quiz that you want to download submissions for. On the right side of the screen, you will see a link that says " Download Submissions". Click on this link to start the download process.</p>
|
27 |
-
<p>How to download quiz submissions in Canvas LMS<br />
|
28 |
-
Canvas quiz submission download function<br />
|
29 |
-
Downloading multiple-choice quiz answers in Canvas<br />
|
30 |
-
Canvas quiz statistics vs quiz submission download<br />
|
31 |
-
Download and re-upload marked quiz submissions in Canvas<br />
|
32 |
-
Canvas bulk download of quiz submissions<br />
|
33 |
-
Download quiz submissions as ZIP file in Canvas<br />
|
34 |
-
Canvas download quiz submissions with images<br />
|
35 |
-
Download quiz submissions by course in Canvas<br />
|
36 |
-
Canvas download quiz submissions for offline grading<br />
|
37 |
-
How to filter quiz submission download in Canvas<br />
|
38 |
-
Canvas download quiz submissions for group assignments<br />
|
39 |
-
Download annotated quiz submissions in Canvas<br />
|
40 |
-
Canvas download quiz submissions with comments<br />
|
41 |
-
Download quiz submissions with resubmissions in Canvas<br />
|
42 |
-
Canvas download quiz submissions by date range<br />
|
43 |
-
Download quiz submissions with rubrics in Canvas<br />
|
44 |
-
Canvas download quiz submissions for new quizzes<br />
|
45 |
-
Download quiz submissions with feedback in Canvas<br />
|
46 |
-
Canvas download quiz submissions for legacy quizzes<br />
|
47 |
-
How to open downloaded quiz submissions in Canvas<br />
|
48 |
-
Canvas download quiz submissions with file names<br />
|
49 |
-
Download quiz submissions with plagiarism reports in Canvas<br />
|
50 |
-
Canvas download quiz submissions for different question types<br />
|
51 |
-
Download quiz submissions with grades in Canvas<br />
|
52 |
-
Canvas download quiz submissions for quizzes with multiple attempts<br />
|
53 |
-
Download quiz submissions with late penalties in Canvas<br />
|
54 |
-
Canvas download quiz submissions for quizzes with time limits<br />
|
55 |
-
Download quiz submissions with partial credit in Canvas<br />
|
56 |
-
Canvas download quiz submissions for quizzes with extra credit<br />
|
57 |
-
How to view downloaded quiz submissions in Canvas<br />
|
58 |
-
Canvas download quiz submissions with attachments<br />
|
59 |
-
Download quiz submissions with formulas in Canvas<br />
|
60 |
-
Canvas download quiz submissions for quizzes with random questions<br />
|
61 |
-
Download quiz submissions with question banks in Canvas<br />
|
62 |
-
Canvas download quiz submissions for quizzes with question groups<br />
|
63 |
-
Download quiz submissions with matching questions in Canvas<br />
|
64 |
-
Canvas download quiz submissions for quizzes with fill-in-the-blank questions<br />
|
65 |
-
Download quiz submissions with essay questions in Canvas<br />
|
66 |
-
Canvas download quiz submissions for quizzes with numerical questions<br />
|
67 |
-
Download quiz submissions with multiple answers questions in Canvas<br />
|
68 |
-
Canvas download quiz submissions for quizzes with true/false questions<br />
|
69 |
-
Download quiz submissions with text (no question) in Canvas<br />
|
70 |
-
Canvas download quiz submissions for quizzes with file upload questions<br />
|
71 |
-
Download quiz submissions with media recordings in Canvas<br />
|
72 |
-
Canvas download quiz submissions for quizzes with stimulus questions<br />
|
73 |
-
Download quiz submissions with hot spot questions in Canvas</p>
|
74 |
-
<h3>Step 2: Save the ZIP file and extract it on your computer</h3>
|
75 |
-
<p>After clicking on the link, you will see a pop-up window that asks you to save the ZIP file that contains the quiz submissions. Choose a location on your computer where you want to save the file and click on "Save". Then, locate the file on your computer and extract it using a program such as WinZip or 7-Zip.</p>
|
76 |
-
<h3>Step 3: View the quiz submissions by course and student name</h3>
|
77 |
-
<p>Once you have extracted the ZIP file, you will see a folder that has the same name as the quiz. Inside this folder, you will find subfolders for each course section and student name. Each subfolder contains a PDF file that shows the quiz submission for that student. You can open these files with a PDF reader such as Adobe Acrobat or Preview. You can also print or share these files as needed.</p>
|
78 |
-
<h2>How to Download Quiz Submissions in Canvas New Quizzes</h2>
|
79 |
-
<p>The new quiz tool is the updated quiz tool in Canvas. It allows you to create quizzes with more question types and options. You can also view quiz analytics and reports for each quiz.</p>
|
80 |
-
<p>To download quiz submissions in Canvas new quizzes, follow these steps:</p>
|
81 |
-
<h3>Step 1: Go to the quiz page and click on "Reports"</h3>
|
82 |
-
<p>Navigate to your course site and click on "Quizzes" in the left-hand menu. Then, click on the name of the quiz that you want to download submissions for. On the right side of the screen, you will see a button that says "Reports". Click on this button to access the reports page.</p>
|
83 |
-
<h3>Step 2: Select the report type and click on "Generate"</h3>
|
84 |
-
<p>On the reports page, you will see two options for report types: "Student Analysis" and "Item Analysis". The student analysis report shows the quiz results for each student, including their score, answers, and feedback. The item analysis report shows the quiz results for each question, including its difficulty, discrimination, and distractors. Choose the report type that you want to download and click on "Generate". This will create a CSV file that contains the quiz data.</p>
|
85 |
-
<h3>Step 3: Download the CSV file and open it with a spreadsheet program</h3>
|
86 |
-
<p>After generating the report, you will see a link that says "Download". Click on this link to download the CSV file to your computer. Then, locate the file on your computer and open it with a spreadsheet program such as Microsoft Excel or Google Sheets. You can view, sort, filter, or analyze the quiz data as needed. You can also print or share this file as needed.</p>
|
87 |
-
<h2>How to Download Quiz Submissions in Canvas Assignments</h2>
|
88 |
-
<p>Assignments are another type of assessments in Canvas. They can be used to assign tasks, projects, papers, or other types of work to students. Assignments can have different submission types, such as online text entry, file upload, media recording, etc.</p>
|
89 |
-
<p>To download quiz submissions in Canvas assignments, follow these steps:</p>
|
90 |
-
<h3>Step 1: Go to the assignment page and click on "Download Submissions"</h3>
|
91 |
-
<p>Navigate to your course site and click on "Assignments" in the left-hand menu. Then, click on the name of the assignment that you want to download submissions for. On the right side of the screen, you will see a link that says "Download Submissions". Click on this link to start the download process.</p>
|
92 |
-
<h3>Step 2: Save the ZIP file and extract it on your computer</h3>
|
93 |
-
<p>After clicking on the link, you will see a pop-up window that asks you to save the ZIP file that contains the assignment submissions. Choose a location on your computer where you want to save the file and click on "Save". Then, locate the file on your computer and extract it using a program such as WinZip or 7-Zip.</p>
|
94 |
-
<h3>Step 3: View the assignment submissions by course and student name</h3>
|
95 |
-
<p>Once you have extracted the ZIP file, you will see a folder that has the same name as the assignment. Inside this folder, you will find subfolders for each course section and student name. Each subfolder contains one or more files that show the assignment submission for that student. You can open these files with an appropriate program depending on their format. For example, if the submission is a Word document, you can open it with Microsoft Word or Google Docs. You can also print or share these files as needed.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<p>In this article, we have shown you how to download quiz submissions in Canvas using different methods. We have also provided some tips and best practices for downloading quiz submissions effectively. We hope that this article has been helpful and informative for you.</p>
|
98 |
-
<p>Downloading quiz submissions from Canvas can be a useful way to review, analyze, or archive the quiz results for your online courses. However, you should also be aware of the limitations and challenges that might arise when downloading quiz submissions. For example, you might not be able to download quiz submissions for certain question types or formats, or with instructor annotations or feedback. You might also not be able to download quiz submissions for group assignments or quizzes that are not graded. Moreover, you might not be able to view or open the downloaded files properly if you do not have the appropriate programs or software.</p>
|
99 |
-
<p>Therefore, we recommend that you follow these tips and best practices when downloading quiz submissions from Canvas:</p>
|
100 |
-
<ul>
|
101 |
-
<li>Check the quiz settings and question types before downloading quiz submissions. Make sure that the quiz allows downloading submissions and that the question types are compatible with the download format.</li>
|
102 |
-
<li>Choose the appropriate download method depending on the quiz tool and report type that you want. For legacy quizzes, use the "Download Submissions" link to download PDF files of quiz submissions. For new quizzes, use the "Reports" button to download CSV files of quiz data. For assignments, use the "Download Submissions" link to download ZIP files of assignment submissions.</li>
|
103 |
-
<li>Save and extract the downloaded files on your computer and open them with the appropriate programs or software. For PDF files, use a PDF reader such as Adobe Acrobat or Preview. For CSV files, use a spreadsheet program such as Microsoft Excel or Google Sheets. For ZIP files, use a program such as WinZip or 7-Zip.</li>
|
104 |
-
<li>View, sort, filter, or analyze the quiz data as needed. You can also print or share the downloaded files as needed.</li>
|
105 |
-
</ul>
|
106 |
-
<p>If you have any feedback or questions about downloading quiz submissions from Canvas, please feel free to share them with us in the comments section below. We would love to hear from you!</p>
|
107 |
-
<h2>FAQs</h2>
|
108 |
-
<h3>Can I download quiz submissions for multiple-choice questions only?</h3>
|
109 |
-
<p>No, you can download quiz submissions for any question type that is supported by the download format. For legacy quizzes, you can download quiz submissions for any question type except for file upload questions. For new quizzes, you can download quiz data for any question type except for stimulus questions.</p>
|
110 |
-
<h3>Can I download quiz submissions with instructor annotations or feedback?</h3>
|
111 |
-
<p>No, you cannot download quiz submissions with instructor annotations or feedback. The downloaded files only show the student responses and scores, not the instructor comments or markings.</p>
|
112 |
-
<h3>Can I download quiz submissions for group assignments?</h3>
|
113 |
-
<p>No, you cannot download quiz submissions for group assignments. The downloaded files only show the individual student submissions and scores, not the group submissions or scores.</p>
|
114 |
-
<h3>Can I download quiz submissions for quizzes that are not graded?</h3>
|
115 |
-
<p>No, you cannot download quiz submissions for quizzes that are not graded. The downloaded files only show the graded quizzes and scores, not the ungraded quizzes or scores.</p>
|
116 |
-
<h3>Can I download quiz submissions for quizzes that have been concluded?</h3>
|
117 |
-
<p>Yes, you can download quiz submissions for quizzes that have been concluded. However, you cannot download quiz submissions for quizzes that have been deleted.</p> 401be4b1e0<br />
|
118 |
-
<br />
|
119 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py
DELETED
@@ -1,553 +0,0 @@
|
|
1 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import Callable, List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import paddle
|
20 |
-
import PIL
|
21 |
-
from packaging import version
|
22 |
-
|
23 |
-
from paddlenlp.transformers import (
|
24 |
-
CLIPTextModel,
|
25 |
-
CLIPTokenizer,
|
26 |
-
DPTForDepthEstimation,
|
27 |
-
DPTImageProcessor,
|
28 |
-
)
|
29 |
-
|
30 |
-
from ...configuration_utils import FrozenDict
|
31 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
32 |
-
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
33 |
-
from ...schedulers import (
|
34 |
-
DDIMScheduler,
|
35 |
-
DPMSolverMultistepScheduler,
|
36 |
-
EulerAncestralDiscreteScheduler,
|
37 |
-
EulerDiscreteScheduler,
|
38 |
-
LMSDiscreteScheduler,
|
39 |
-
PNDMScheduler,
|
40 |
-
)
|
41 |
-
from ...utils import PIL_INTERPOLATION, deprecate, logging
|
42 |
-
|
43 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
44 |
-
|
45 |
-
|
46 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
|
47 |
-
def preprocess(image):
|
48 |
-
if isinstance(image, paddle.Tensor):
|
49 |
-
return image
|
50 |
-
elif isinstance(image, PIL.Image.Image):
|
51 |
-
image = [image]
|
52 |
-
|
53 |
-
if isinstance(image[0], PIL.Image.Image):
|
54 |
-
w, h = image[0].size
|
55 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
56 |
-
|
57 |
-
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
58 |
-
image = np.concatenate(image, axis=0)
|
59 |
-
image = np.array(image).astype(np.float32) / 255.0
|
60 |
-
image = image.transpose(0, 3, 1, 2)
|
61 |
-
image = 2.0 * image - 1.0
|
62 |
-
image = paddle.to_tensor(image)
|
63 |
-
elif isinstance(image[0], paddle.Tensor):
|
64 |
-
image = paddle.concat(image, axis=0)
|
65 |
-
return image
|
66 |
-
|
67 |
-
|
68 |
-
class StableDiffusionDepth2ImgPipeline(DiffusionPipeline):
|
69 |
-
r"""
|
70 |
-
Pipeline for text-guided image to image generation using Stable Diffusion.
|
71 |
-
|
72 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
73 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
74 |
-
|
75 |
-
Args:
|
76 |
-
vae ([`AutoencoderKL`]):
|
77 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
78 |
-
text_encoder ([`CLIPTextModel`]):
|
79 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
80 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
81 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
82 |
-
tokenizer (`CLIPTokenizer`):
|
83 |
-
Tokenizer of class
|
84 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
85 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
86 |
-
scheduler ([`SchedulerMixin`]):
|
87 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
88 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
89 |
-
"""
|
90 |
-
|
91 |
-
def __init__(
|
92 |
-
self,
|
93 |
-
vae: AutoencoderKL,
|
94 |
-
text_encoder: CLIPTextModel,
|
95 |
-
tokenizer: CLIPTokenizer,
|
96 |
-
unet: UNet2DConditionModel,
|
97 |
-
scheduler: Union[
|
98 |
-
DDIMScheduler,
|
99 |
-
PNDMScheduler,
|
100 |
-
LMSDiscreteScheduler,
|
101 |
-
EulerDiscreteScheduler,
|
102 |
-
EulerAncestralDiscreteScheduler,
|
103 |
-
DPMSolverMultistepScheduler,
|
104 |
-
],
|
105 |
-
depth_estimator: DPTForDepthEstimation,
|
106 |
-
feature_extractor: DPTImageProcessor,
|
107 |
-
):
|
108 |
-
super().__init__()
|
109 |
-
|
110 |
-
is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
|
111 |
-
version.parse(unet.config._ppdiffusers_version).base_version
|
112 |
-
) < version.parse("0.9.0.dev0")
|
113 |
-
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
114 |
-
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
115 |
-
deprecation_message = (
|
116 |
-
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
117 |
-
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
118 |
-
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
119 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
120 |
-
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
121 |
-
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
122 |
-
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
123 |
-
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
124 |
-
" the `unet/config.json` file"
|
125 |
-
)
|
126 |
-
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
127 |
-
new_config = dict(unet.config)
|
128 |
-
new_config["sample_size"] = 64
|
129 |
-
unet._internal_dict = FrozenDict(new_config)
|
130 |
-
|
131 |
-
self.register_modules(
|
132 |
-
vae=vae,
|
133 |
-
text_encoder=text_encoder,
|
134 |
-
tokenizer=tokenizer,
|
135 |
-
unet=unet,
|
136 |
-
scheduler=scheduler,
|
137 |
-
depth_estimator=depth_estimator,
|
138 |
-
feature_extractor=feature_extractor,
|
139 |
-
)
|
140 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
141 |
-
|
142 |
-
def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
143 |
-
r"""
|
144 |
-
Encodes the prompt into text encoder hidden states.
|
145 |
-
|
146 |
-
Args:
|
147 |
-
prompt (`str` or `list(int)`):
|
148 |
-
prompt to be encoded
|
149 |
-
num_images_per_prompt (`int`):
|
150 |
-
number of images that should be generated per prompt
|
151 |
-
do_classifier_free_guidance (`bool`):
|
152 |
-
whether to use classifier free guidance or not
|
153 |
-
negative_prompt (`str` or `List[str]`):
|
154 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
155 |
-
if `guidance_scale` is less than `1`).
|
156 |
-
"""
|
157 |
-
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
158 |
-
|
159 |
-
text_inputs = self.tokenizer(
|
160 |
-
prompt,
|
161 |
-
padding="max_length",
|
162 |
-
max_length=self.tokenizer.model_max_length,
|
163 |
-
truncation=True,
|
164 |
-
return_tensors="pd",
|
165 |
-
)
|
166 |
-
text_input_ids = text_inputs.input_ids
|
167 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
|
168 |
-
|
169 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
|
170 |
-
text_input_ids, untruncated_ids
|
171 |
-
):
|
172 |
-
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
|
173 |
-
logger.warning(
|
174 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
175 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
176 |
-
)
|
177 |
-
|
178 |
-
config = (
|
179 |
-
self.text_encoder.config
|
180 |
-
if isinstance(self.text_encoder.config, dict)
|
181 |
-
else self.text_encoder.config.to_dict()
|
182 |
-
)
|
183 |
-
if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]:
|
184 |
-
attention_mask = text_inputs.attention_mask
|
185 |
-
else:
|
186 |
-
attention_mask = None
|
187 |
-
|
188 |
-
text_embeddings = self.text_encoder(
|
189 |
-
text_input_ids,
|
190 |
-
attention_mask=attention_mask,
|
191 |
-
)
|
192 |
-
text_embeddings = text_embeddings[0]
|
193 |
-
|
194 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
195 |
-
bs_embed, seq_len, _ = text_embeddings.shape
|
196 |
-
text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
|
197 |
-
text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
|
198 |
-
|
199 |
-
# get unconditional embeddings for classifier free guidance
|
200 |
-
if do_classifier_free_guidance:
|
201 |
-
uncond_tokens: List[str]
|
202 |
-
if negative_prompt is None:
|
203 |
-
uncond_tokens = [""] * batch_size
|
204 |
-
elif type(prompt) is not type(negative_prompt):
|
205 |
-
raise TypeError(
|
206 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
207 |
-
f" {type(prompt)}."
|
208 |
-
)
|
209 |
-
elif isinstance(negative_prompt, str):
|
210 |
-
uncond_tokens = [negative_prompt]
|
211 |
-
elif batch_size != len(negative_prompt):
|
212 |
-
raise ValueError(
|
213 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
214 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
215 |
-
" the batch size of `prompt`."
|
216 |
-
)
|
217 |
-
else:
|
218 |
-
uncond_tokens = negative_prompt
|
219 |
-
|
220 |
-
max_length = text_input_ids.shape[-1]
|
221 |
-
uncond_input = self.tokenizer(
|
222 |
-
uncond_tokens,
|
223 |
-
padding="max_length",
|
224 |
-
max_length=max_length,
|
225 |
-
truncation=True,
|
226 |
-
return_tensors="pd",
|
227 |
-
)
|
228 |
-
|
229 |
-
if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]:
|
230 |
-
attention_mask = uncond_input.attention_mask
|
231 |
-
else:
|
232 |
-
attention_mask = None
|
233 |
-
|
234 |
-
uncond_embeddings = self.text_encoder(
|
235 |
-
uncond_input.input_ids,
|
236 |
-
attention_mask=attention_mask,
|
237 |
-
)
|
238 |
-
uncond_embeddings = uncond_embeddings[0]
|
239 |
-
|
240 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
241 |
-
seq_len = uncond_embeddings.shape[1]
|
242 |
-
uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
|
243 |
-
uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
|
244 |
-
|
245 |
-
# For classifier free guidance, we need to do two forward passes.
|
246 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
247 |
-
# to avoid doing two forward passes
|
248 |
-
text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
|
249 |
-
|
250 |
-
return text_embeddings
|
251 |
-
|
252 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
253 |
-
def run_safety_checker(self, image, dtype):
|
254 |
-
if self.safety_checker is not None:
|
255 |
-
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
|
256 |
-
image, has_nsfw_concept = self.safety_checker(
|
257 |
-
images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
|
258 |
-
)
|
259 |
-
else:
|
260 |
-
has_nsfw_concept = None
|
261 |
-
return image, has_nsfw_concept
|
262 |
-
|
263 |
-
def decode_latents(self, latents):
|
264 |
-
latents = 1 / 0.18215 * latents
|
265 |
-
image = self.vae.decode(latents).sample
|
266 |
-
image = (image / 2 + 0.5).clip(0, 1)
|
267 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
268 |
-
image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
|
269 |
-
return image
|
270 |
-
|
271 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
272 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
273 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
274 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
275 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
276 |
-
# and should be between [0, 1]
|
277 |
-
|
278 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
279 |
-
extra_step_kwargs = {}
|
280 |
-
if accepts_eta:
|
281 |
-
extra_step_kwargs["eta"] = eta
|
282 |
-
|
283 |
-
# check if the scheduler accepts generator
|
284 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
285 |
-
if accepts_generator:
|
286 |
-
extra_step_kwargs["generator"] = generator
|
287 |
-
return extra_step_kwargs
|
288 |
-
|
289 |
-
def check_inputs(self, prompt, strength, callback_steps):
|
290 |
-
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
291 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
292 |
-
|
293 |
-
if strength < 0 or strength > 1:
|
294 |
-
raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
|
295 |
-
|
296 |
-
if (callback_steps is None) or (
|
297 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
298 |
-
):
|
299 |
-
raise ValueError(
|
300 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
301 |
-
f" {type(callback_steps)}."
|
302 |
-
)
|
303 |
-
|
304 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
305 |
-
def get_timesteps(self, num_inference_steps, strength):
|
306 |
-
# get the original timestep using init_timestep
|
307 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
308 |
-
|
309 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
310 |
-
timesteps = self.scheduler.timesteps[t_start:]
|
311 |
-
|
312 |
-
return timesteps, num_inference_steps - t_start
|
313 |
-
|
314 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
|
315 |
-
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None):
|
316 |
-
image = image.cast(dtype=dtype)
|
317 |
-
|
318 |
-
batch_size = batch_size * num_images_per_prompt
|
319 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
320 |
-
raise ValueError(
|
321 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
322 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
323 |
-
)
|
324 |
-
|
325 |
-
if isinstance(generator, list):
|
326 |
-
init_latents = [
|
327 |
-
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
328 |
-
]
|
329 |
-
init_latents = paddle.concat(init_latents, axis=0)
|
330 |
-
else:
|
331 |
-
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
332 |
-
init_latents = 0.18215 * init_latents
|
333 |
-
|
334 |
-
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
335 |
-
# expand init_latents for batch_size
|
336 |
-
deprecation_message = (
|
337 |
-
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
338 |
-
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
339 |
-
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
340 |
-
" your script to pass as many initial images as text prompts to suppress this warning."
|
341 |
-
)
|
342 |
-
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
343 |
-
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
344 |
-
init_latents = paddle.concat([init_latents] * additional_image_per_prompt, axis=0)
|
345 |
-
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
346 |
-
raise ValueError(
|
347 |
-
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
348 |
-
)
|
349 |
-
else:
|
350 |
-
init_latents = paddle.concat([init_latents], axis=0)
|
351 |
-
|
352 |
-
shape = init_latents.shape
|
353 |
-
if isinstance(generator, list):
|
354 |
-
shape = [
|
355 |
-
1,
|
356 |
-
] + shape[1:]
|
357 |
-
noise = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
|
358 |
-
noise = paddle.concat(noise, axis=0)
|
359 |
-
else:
|
360 |
-
noise = paddle.randn(shape, generator=generator, dtype=dtype)
|
361 |
-
|
362 |
-
# get latents
|
363 |
-
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
364 |
-
latents = init_latents
|
365 |
-
|
366 |
-
return latents
|
367 |
-
|
368 |
-
def prepare_depth_map(self, image, depth_map, batch_size, do_classifier_free_guidance, dtype):
|
369 |
-
if isinstance(image, PIL.Image.Image):
|
370 |
-
image = [image]
|
371 |
-
else:
|
372 |
-
image = [img for img in image]
|
373 |
-
|
374 |
-
if isinstance(image[0], PIL.Image.Image):
|
375 |
-
width, height = image[0].size
|
376 |
-
else:
|
377 |
-
width, height = image[0].shape[-2:]
|
378 |
-
|
379 |
-
if depth_map is None:
|
380 |
-
pixel_values = self.feature_extractor(images=image, return_tensors="pd").pixel_values
|
381 |
-
# The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16.
|
382 |
-
# TODO junnyu, we donot use fp16.
|
383 |
-
depth_map = self.depth_estimator(pixel_values).predicted_depth
|
384 |
-
else:
|
385 |
-
depth_map = depth_map.cast(dtype)
|
386 |
-
|
387 |
-
depth_map = paddle.nn.functional.interpolate(
|
388 |
-
depth_map.unsqueeze(1),
|
389 |
-
size=(height // self.vae_scale_factor, width // self.vae_scale_factor),
|
390 |
-
mode="bicubic",
|
391 |
-
align_corners=False,
|
392 |
-
)
|
393 |
-
|
394 |
-
depth_min = paddle.amin(depth_map, axis=[1, 2, 3], keepdim=True)
|
395 |
-
depth_max = paddle.amax(depth_map, axis=[1, 2, 3], keepdim=True)
|
396 |
-
depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0
|
397 |
-
depth_map = depth_map.cast(dtype)
|
398 |
-
|
399 |
-
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
400 |
-
if depth_map.shape[0] < batch_size:
|
401 |
-
depth_map = depth_map.tile([batch_size, 1, 1, 1])
|
402 |
-
|
403 |
-
depth_map = paddle.concat([depth_map] * 2) if do_classifier_free_guidance else depth_map
|
404 |
-
return depth_map
|
405 |
-
|
406 |
-
@paddle.no_grad()
|
407 |
-
def __call__(
|
408 |
-
self,
|
409 |
-
prompt: Union[str, List[str]],
|
410 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
411 |
-
depth_map: Optional[paddle.Tensor] = None,
|
412 |
-
strength: float = 0.8,
|
413 |
-
num_inference_steps: Optional[int] = 50,
|
414 |
-
guidance_scale: Optional[float] = 7.5,
|
415 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
416 |
-
num_images_per_prompt: Optional[int] = 1,
|
417 |
-
eta: Optional[float] = 0.0,
|
418 |
-
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
|
419 |
-
output_type: Optional[str] = "pil",
|
420 |
-
return_dict: bool = True,
|
421 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
422 |
-
callback_steps: Optional[int] = 1,
|
423 |
-
):
|
424 |
-
r"""
|
425 |
-
Function invoked when calling the pipeline for generation.
|
426 |
-
|
427 |
-
Args:
|
428 |
-
prompt (`str` or `List[str]`):
|
429 |
-
The prompt or prompts to guide the image generation.
|
430 |
-
image (`paddle.Tensor` or `PIL.Image.Image`):
|
431 |
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
432 |
-
process.
|
433 |
-
strength (`float`, *optional*, defaults to 0.8):
|
434 |
-
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
|
435 |
-
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
|
436 |
-
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
|
437 |
-
be maximum and the denoising process will run for the full number of iterations specified in
|
438 |
-
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
439 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
440 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
441 |
-
expense of slower inference. This parameter will be modulated by `strength`.
|
442 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
443 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
444 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
445 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
446 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
447 |
-
usually at the expense of lower image quality.
|
448 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
449 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
450 |
-
if `guidance_scale` is less than `1`).
|
451 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
452 |
-
The number of images to generate per prompt.
|
453 |
-
eta (`float`, *optional*, defaults to 0.0):
|
454 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
455 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
456 |
-
generator (`torch.Generator`, *optional*):
|
457 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
458 |
-
to make generation deterministic.
|
459 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
460 |
-
The output format of the generate image. Choose between
|
461 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
462 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
463 |
-
Whether or not to return a [`~pipelines.pipeline_utils.ImagePipelineOutput`] instead of a
|
464 |
-
plain tuple.
|
465 |
-
callback (`Callable`, *optional*):
|
466 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
467 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
468 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
469 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
470 |
-
called at every step.
|
471 |
-
|
472 |
-
Returns:
|
473 |
-
[`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`:
|
474 |
-
[`~pipelines.pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
475 |
-
When returning a tuple, the first element is a list with the generated images.
|
476 |
-
"""
|
477 |
-
# 1. Check inputs
|
478 |
-
self.check_inputs(prompt, strength, callback_steps)
|
479 |
-
|
480 |
-
# 2. Define call parameters
|
481 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
482 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
483 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
484 |
-
# corresponds to doing no classifier free guidance.
|
485 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
486 |
-
|
487 |
-
# 3. Encode input prompt
|
488 |
-
text_embeddings = self._encode_prompt(
|
489 |
-
prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
490 |
-
)
|
491 |
-
|
492 |
-
# 4. Preprocess image
|
493 |
-
depth_mask = self.prepare_depth_map(
|
494 |
-
image,
|
495 |
-
depth_map,
|
496 |
-
batch_size * num_images_per_prompt,
|
497 |
-
do_classifier_free_guidance,
|
498 |
-
text_embeddings.dtype,
|
499 |
-
)
|
500 |
-
|
501 |
-
# 5. Prepare depth mask
|
502 |
-
image = preprocess(image)
|
503 |
-
|
504 |
-
# 6. set timesteps
|
505 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
506 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
|
507 |
-
latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
|
508 |
-
|
509 |
-
# 7. Prepare latent variables
|
510 |
-
latents = self.prepare_latents(
|
511 |
-
image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, generator
|
512 |
-
)
|
513 |
-
|
514 |
-
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
515 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
516 |
-
|
517 |
-
# 9. Denoising loop
|
518 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
519 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
520 |
-
for i, t in enumerate(timesteps):
|
521 |
-
# expand the latents if we are doing classifier free guidance
|
522 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
523 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
524 |
-
latent_model_input = paddle.concat([latent_model_input, depth_mask], axis=1)
|
525 |
-
|
526 |
-
# predict the noise residual
|
527 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
528 |
-
|
529 |
-
# perform guidance
|
530 |
-
if do_classifier_free_guidance:
|
531 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
532 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
533 |
-
|
534 |
-
# compute the previous noisy sample x_t -> x_t-1
|
535 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
536 |
-
|
537 |
-
# call the callback, if provided
|
538 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
539 |
-
progress_bar.update()
|
540 |
-
if callback is not None and i % callback_steps == 0:
|
541 |
-
callback(i, t, latents)
|
542 |
-
|
543 |
-
# 10. Post-processing
|
544 |
-
image = self.decode_latents(latents)
|
545 |
-
|
546 |
-
# 11. Convert to PIL
|
547 |
-
if output_type == "pil":
|
548 |
-
image = self.numpy_to_pil(image)
|
549 |
-
|
550 |
-
if not return_dict:
|
551 |
-
return (image,)
|
552 |
-
|
553 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/setting/SettingLoader.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
import yaml
|
4 |
-
|
5 |
-
from ..utility import engine_root, get_save_dir
|
6 |
-
from .Setting import Setting
|
7 |
-
|
8 |
-
DEFAULT_SETTING_PATH: Path = engine_root() / "default_setting.yml"
|
9 |
-
USER_SETTING_PATH: Path = get_save_dir() / "setting.yml"
|
10 |
-
|
11 |
-
|
12 |
-
class SettingLoader:
|
13 |
-
def __init__(self, setting_file_path: Path) -> None:
|
14 |
-
self.setting_file_path = setting_file_path
|
15 |
-
|
16 |
-
def load_setting_file(self) -> Setting:
|
17 |
-
if not self.setting_file_path.is_file():
|
18 |
-
setting = yaml.safe_load(DEFAULT_SETTING_PATH.read_text(encoding="utf-8"))
|
19 |
-
else:
|
20 |
-
setting = yaml.safe_load(self.setting_file_path.read_text(encoding="utf-8"))
|
21 |
-
|
22 |
-
setting = Setting(
|
23 |
-
cors_policy_mode=setting["cors_policy_mode"],
|
24 |
-
allow_origin=setting["allow_origin"],
|
25 |
-
)
|
26 |
-
|
27 |
-
return setting
|
28 |
-
|
29 |
-
def dump_setting_file(self, settings: Setting) -> None:
|
30 |
-
settings_dict = settings.dict()
|
31 |
-
|
32 |
-
with open(self.setting_file_path, mode="w", encoding="utf-8") as f:
|
33 |
-
yaml.safe_dump(settings_dict, f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4th3n4/TraDeX/app-plain.py
DELETED
@@ -1,957 +0,0 @@
|
|
1 |
-
# %%
|
2 |
-
# Import section
|
3 |
-
# (Please don't edit this section unless if necessary)
|
4 |
-
import copy
|
5 |
-
from pathlib import Path
|
6 |
-
import warnings
|
7 |
-
import holidays
|
8 |
-
import seaborn as sns
|
9 |
-
import matplotlib
|
10 |
-
import matplotlib.dates as mdates
|
11 |
-
import matplotlib.pyplot as plt
|
12 |
-
plt.style.use('fivethirtyeight')
|
13 |
-
import numpy as np
|
14 |
-
import pandas as pd
|
15 |
-
import glob
|
16 |
-
import csv
|
17 |
-
import lightning.pytorch as pl
|
18 |
-
from lightning.pytorch.callbacks import EarlyStopping, LearningRateMonitor
|
19 |
-
from lightning.pytorch.loggers import TensorBoardLogger
|
20 |
-
import torch
|
21 |
-
from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
|
22 |
-
from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder
|
23 |
-
from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss
|
24 |
-
from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
|
25 |
-
import random
|
26 |
-
import gc
|
27 |
-
import tensorflow as tf
|
28 |
-
import tensorboard as tb
|
29 |
-
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
|
30 |
-
import os
|
31 |
-
import math
|
32 |
-
import sys
|
33 |
-
from sklearn.model_selection import train_test_split
|
34 |
-
from sklearn.preprocessing import MinMaxScaler
|
35 |
-
import tensorflow as tf
|
36 |
-
from tensorflow.keras.layers import Conv1D, LSTM, Dense, Dropout, Bidirectional, TimeDistributed
|
37 |
-
from tensorflow.keras.layers import MaxPooling1D, Flatten
|
38 |
-
from tensorflow.keras.regularizers import L1, L2
|
39 |
-
from tensorflow.keras.metrics import Accuracy
|
40 |
-
from tensorflow.keras.metrics import RootMeanSquaredError
|
41 |
-
from sklearn.metrics import mean_squared_error as MSE
|
42 |
-
from sklearn.model_selection import KFold
|
43 |
-
from sklearn.inspection import permutation_importance
|
44 |
-
from tensorflow.keras.utils import plot_model
|
45 |
-
from sklearn.metrics import explained_variance_score, mean_poisson_deviance, mean_gamma_deviance, mean_squared_error, mean_squared_log_error, d2_absolute_error_score, d2_pinball_score, d2_tweedie_score
|
46 |
-
from sklearn.metrics import r2_score
|
47 |
-
from sklearn.metrics import max_error
|
48 |
-
import datetime
|
49 |
-
from datetime import date
|
50 |
-
import optuna
|
51 |
-
from tensorflow.keras.callbacks import Callback
|
52 |
-
from optuna.integration import TFKerasPruningCallback
|
53 |
-
import shutil
|
54 |
-
import gradio as gr
|
55 |
-
|
56 |
-
# Some variables (don't edit these variables unless if necessary)
|
57 |
-
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
58 |
-
random.seed(30)
|
59 |
-
np.random.seed(30)
|
60 |
-
tf.random.set_seed(30)
|
61 |
-
torch.manual_seed(30)
|
62 |
-
torch.cuda.manual_seed(30)
|
63 |
-
|
64 |
-
# Global variables
|
65 |
-
PATIENCE = 30
|
66 |
-
MAX_EPOCHS = 3
|
67 |
-
LEARNING_RATE = 0.01
|
68 |
-
OPTUNA = True
|
69 |
-
ACCELERATOR = "cpu"
|
70 |
-
# This below line is only for GPU. Don't use it for CPU
|
71 |
-
#os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"
|
72 |
-
|
73 |
-
# Variables to count the number of files
|
74 |
-
w = 7
|
75 |
-
prax = [0 for x in range(w)]
|
76 |
-
|
77 |
-
# %%
|
78 |
-
# Objective function for Optuna (CNN-LSTM)
|
79 |
-
def objective(trial, X_train, y_train, X_test, y_test):
|
80 |
-
model = tf.keras.Sequential()
|
81 |
-
|
82 |
-
# Creating the Neural Network model here...
|
83 |
-
# CNN layers
|
84 |
-
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))
|
85 |
-
# model.add(Dense(5, kernel_regularizer=L2(0.01)))
|
86 |
-
|
87 |
-
# LSTM layers
|
88 |
-
model.add(Bidirectional(LSTM(trial.suggest_int("lstm_units_1", 32, 256), return_sequences=True)))
|
89 |
-
model.add(Dropout(trial.suggest_float("dropout_1", 0.1, 0.5)))
|
90 |
-
model.add(Bidirectional(LSTM(trial.suggest_int("lstm_units_2", 32, 256), return_sequences=False)))
|
91 |
-
model.add(Dropout(trial.suggest_float("dropout_2", 0.1, 0.5)))
|
92 |
-
|
93 |
-
#Final layers
|
94 |
-
model.add(Dense(1, activation='relu'))
|
95 |
-
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
|
96 |
-
|
97 |
-
# Train the model
|
98 |
-
pruning_callback = TFKerasPruningCallback(trial, "val_loss")
|
99 |
-
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=15, batch_size=32, verbose=0, callbacks=[pruning_callback])
|
100 |
-
|
101 |
-
# Evaluate the model
|
102 |
-
loss = model.evaluate(X_test, y_test, verbose=0)[0]
|
103 |
-
|
104 |
-
return loss
|
105 |
-
|
106 |
-
# %%
|
107 |
-
# Function to train the model (CNN-LSTM)
|
108 |
-
def modelCNNLSTM(csv_file, prax):
|
109 |
-
# Read the data
|
110 |
-
df = csv_file
|
111 |
-
#df = df['Date/Time'].values.astype("float64")
|
112 |
-
temp_data = df.iloc[0:len(df)-100, 1:21]
|
113 |
-
trek = df.iloc[len(df)-100:,1:21]
|
114 |
-
#print(temp_data)
|
115 |
-
data = temp_data
|
116 |
-
sc = MinMaxScaler()
|
117 |
-
# Split the data into training and testing sets
|
118 |
-
train_size = int(len(data) * 0.8)
|
119 |
-
train_data, test_data = data[:train_size], data[train_size:]
|
120 |
-
# Separate the input features and target variable
|
121 |
-
X_train, y_train = train_data, train_data['Close']
|
122 |
-
X_test, y_test = test_data, test_data['Close']
|
123 |
-
|
124 |
-
X_train = X_train[0:len(X_train)-1]
|
125 |
-
y_train = y_train[1:len(y_train)]
|
126 |
-
X_test = X_test[0:len(X_test)-1]
|
127 |
-
y_test = y_test[1:len(y_test)]
|
128 |
-
|
129 |
-
Xt = X_train
|
130 |
-
Xts = X_test
|
131 |
-
Yt = y_train
|
132 |
-
Yts = y_test
|
133 |
-
|
134 |
-
y_train = y_train.values.reshape(-1,1)
|
135 |
-
y_test = y_test.values.reshape(-1,1)
|
136 |
-
|
137 |
-
X_train = sc.fit_transform(X_train)
|
138 |
-
y_train = sc.fit_transform(y_train)
|
139 |
-
X_test = sc.fit_transform(X_test)
|
140 |
-
y_test = sc.fit_transform(y_test)
|
141 |
-
|
142 |
-
x_tr=pd.DataFrame(X_train, index = Xt.index, columns = Xt.columns)
|
143 |
-
y_tr=pd.DataFrame(y_train, index = Yt.index)
|
144 |
-
x_te=pd.DataFrame(X_test, index = Xts.index, columns = Xts.columns)
|
145 |
-
y_te=pd.DataFrame(y_test, index = Yts.index)
|
146 |
-
|
147 |
-
# Reshape the data for the CNN-LSTM model
|
148 |
-
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
|
149 |
-
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
|
150 |
-
|
151 |
-
study = optuna.create_study(direction="minimize", pruner=optuna.pruners.MedianPruner(n_min_trials=4, n_startup_trials=4))
|
152 |
-
fn = lambda trial: objective(trial, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
|
153 |
-
study.optimize(fn, n_trials=5)
|
154 |
-
|
155 |
-
best_params = study.best_params
|
156 |
-
#print(f"Best params: {best_params}")
|
157 |
-
|
158 |
-
model = tf.keras.Sequential()
|
159 |
-
|
160 |
-
# Creating the Neural Network model here...
|
161 |
-
# CNN layers
|
162 |
-
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))
|
163 |
-
# model.add(Dense(5, kernel_regularizer=L2(0.01)))
|
164 |
-
|
165 |
-
# LSTM layers
|
166 |
-
model.add(Bidirectional(LSTM(best_params["lstm_units_1"], return_sequences=True)))
|
167 |
-
model.add(Dropout(best_params["dropout_1"]))
|
168 |
-
model.add(Bidirectional(LSTM(best_params["lstm_units_2"], return_sequences=False)))
|
169 |
-
model.add(Dropout(best_params["dropout_2"]))
|
170 |
-
|
171 |
-
#Final layers
|
172 |
-
model.add(Dense(1, activation='relu'))
|
173 |
-
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
|
174 |
-
|
175 |
-
# Train the model
|
176 |
-
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32, verbose=0)
|
177 |
-
|
178 |
-
# Evaluate the model
|
179 |
-
loss = model.evaluate(X_test, y_test, verbose=0)[0]
|
180 |
-
|
181 |
-
print(f"Final loss (without KFold): {loss}")
|
182 |
-
|
183 |
-
kfold = KFold(n_splits=10, shuffle=True)
|
184 |
-
|
185 |
-
inputs = np.concatenate((X_train, X_test), axis=0)
|
186 |
-
targets = np.concatenate((y_train, y_test), axis=0)
|
187 |
-
acc_per_fold = []
|
188 |
-
loss_per_fold = []
|
189 |
-
xgb_res = []
|
190 |
-
num_epochs = 10
|
191 |
-
batch_size = 32
|
192 |
-
|
193 |
-
fold_no = 1
|
194 |
-
print('------------------------------------------------------------------------')
|
195 |
-
print("Training for 10 folds... Standby")
|
196 |
-
for train, test in kfold.split(inputs, targets):
|
197 |
-
#print('------------------------------------------------------------------------')
|
198 |
-
#print(f'Training for fold {fold_no} ...')
|
199 |
-
history = model.fit(inputs[train], targets[train],
|
200 |
-
batch_size=32,
|
201 |
-
epochs=15,
|
202 |
-
verbose=0)
|
203 |
-
|
204 |
-
scores = model.evaluate(inputs[test], targets[test], verbose=0)
|
205 |
-
#print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
|
206 |
-
acc_per_fold.append(scores[1] * 100)
|
207 |
-
loss_per_fold.append(scores[0])
|
208 |
-
fold_no = fold_no + 1
|
209 |
-
|
210 |
-
|
211 |
-
print('------------------------------------------------------------------------')
|
212 |
-
#print('Score per fold')
|
213 |
-
#for i in range(0, len(acc_per_fold)):
|
214 |
-
# print('------------------------------------------------------------------------')
|
215 |
-
# print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Loss%: {acc_per_fold[i]}%')
|
216 |
-
#print('------------------------------------------------------------------------')
|
217 |
-
#print('Average scores for all folds:')
|
218 |
-
#print(f'> Possible Loss %: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
|
219 |
-
#print(f'> Loss: {np.mean(loss_per_fold)}')
|
220 |
-
#print('------------------------------------------------------------------------')
|
221 |
-
|
222 |
-
trek = df.iloc[0:len(df), 1:21]
|
223 |
-
Y = trek[0:len(trek)]
|
224 |
-
YP = trek[1:len(trek)]
|
225 |
-
Y1 = Y['Close']
|
226 |
-
Y2 = YP['Close']
|
227 |
-
Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
|
228 |
-
#X = sc.fit_transform(X.reshape(-1,22))
|
229 |
-
Y = np.array(Y)
|
230 |
-
Y1 = np.array(Y1)
|
231 |
-
Y = sc.fit_transform(Y)
|
232 |
-
Y1 = Y1.reshape(-1,1)
|
233 |
-
Y1 = sc.fit_transform(Y1)
|
234 |
-
|
235 |
-
train_X = Y.reshape(Y.shape[0],Y.shape[1],1)
|
236 |
-
#Y = Y.reshape(-1,1)
|
237 |
-
pred = model.predict(train_X, verbose=0)
|
238 |
-
pred = np.array(pred).reshape(-1,1)
|
239 |
-
var2 = max_error(pred.reshape(-1,1), Y1)
|
240 |
-
print('Max Error: %f' % var2)
|
241 |
-
prax[5] = float(var2)
|
242 |
-
pred = sc.inverse_transform(pred)
|
243 |
-
|
244 |
-
print(pred[-2], pred[-1])
|
245 |
-
prax[3] = pred[-2]
|
246 |
-
prax[4] = pred[-1]
|
247 |
-
if(pred[-1]-pred[-2]>0):
|
248 |
-
prax[6] = 1
|
249 |
-
elif(pred[-1]-pred[-2]==0):
|
250 |
-
prax[6] = 0
|
251 |
-
else:
|
252 |
-
prax[6] = -1
|
253 |
-
|
254 |
-
# %%
|
255 |
-
# Function to train the model (CNN-LSTM)
|
256 |
-
def modelCNNLSTM_OpenGap(csv_file, prax):
|
257 |
-
# Read the data
|
258 |
-
df = csv_file
|
259 |
-
datLength = len(df)
|
260 |
-
df['O-C'] = 0
|
261 |
-
for i in range(datLength):
|
262 |
-
if i == 0:
|
263 |
-
df['O-C'][i] = 0
|
264 |
-
continue
|
265 |
-
else:
|
266 |
-
df['O-C'][i] = df['Open'][i] - df['Close'][i-1]
|
267 |
-
temp_data = df.iloc[0:datLength-100, 1:22]
|
268 |
-
trek = df.iloc[datLength-100:,1:22]
|
269 |
-
#print(temp_data)
|
270 |
-
data = temp_data
|
271 |
-
#data = data.values.astype("float64")
|
272 |
-
sc = MinMaxScaler()
|
273 |
-
# Split the data into training and testing sets
|
274 |
-
train_size = int(len(data) * 0.8)
|
275 |
-
train_data, test_data = data[:train_size], data[train_size:]
|
276 |
-
|
277 |
-
# Separate the input features and target variable
|
278 |
-
X_train, y_train = train_data, train_data['Close']
|
279 |
-
X_test, y_test = test_data, test_data['Close']
|
280 |
-
|
281 |
-
X_train = X_train[0:len(X_train)-1]
|
282 |
-
y_train = y_train[1:len(y_train)]
|
283 |
-
X_test = X_test[0:len(X_test)-1]
|
284 |
-
y_test = y_test[1:len(y_test)]
|
285 |
-
|
286 |
-
Xt = X_train
|
287 |
-
Xts = X_test
|
288 |
-
Yt = y_train
|
289 |
-
Yts = y_test
|
290 |
-
|
291 |
-
y_train = y_train.values.reshape(-1,1)
|
292 |
-
y_test = y_test.values.reshape(-1,1)
|
293 |
-
|
294 |
-
X_train = sc.fit_transform(X_train)
|
295 |
-
y_train = sc.fit_transform(y_train)
|
296 |
-
X_test = sc.fit_transform(X_test)
|
297 |
-
y_test = sc.fit_transform(y_test)
|
298 |
-
|
299 |
-
x_tr=pd.DataFrame(X_train, index = Xt.index, columns = Xt.columns)
|
300 |
-
y_tr=pd.DataFrame(y_train, index = Yt.index)
|
301 |
-
x_te=pd.DataFrame(X_test, index = Xts.index, columns = Xts.columns)
|
302 |
-
y_te=pd.DataFrame(y_test, index = Yts.index)
|
303 |
-
|
304 |
-
# Reshape the data for the CNN-LSTM model
|
305 |
-
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
|
306 |
-
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
|
307 |
-
|
308 |
-
study = optuna.create_study(direction="minimize", pruner=optuna.pruners.MedianPruner(n_min_trials=2, n_startup_trials=2))
|
309 |
-
fn = lambda trial: objective(trial, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
|
310 |
-
study.optimize(fn, n_trials=5)
|
311 |
-
|
312 |
-
best_params = study.best_params
|
313 |
-
#print(f"Best params: {best_params}")
|
314 |
-
|
315 |
-
model = tf.keras.Sequential()
|
316 |
-
|
317 |
-
# Creating the Neural Network model here...
|
318 |
-
# CNN layers
|
319 |
-
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))
|
320 |
-
# model.add(Dense(5, kernel_regularizer=L2(0.01)))
|
321 |
-
|
322 |
-
# LSTM layers
|
323 |
-
model.add(Bidirectional(LSTM(best_params["lstm_units_1"], return_sequences=True)))
|
324 |
-
model.add(Dropout(best_params["dropout_1"]))
|
325 |
-
model.add(Bidirectional(LSTM(best_params["lstm_units_2"], return_sequences=False)))
|
326 |
-
model.add(Dropout(best_params["dropout_2"]))
|
327 |
-
|
328 |
-
#Final layers
|
329 |
-
model.add(Dense(1, activation='relu'))
|
330 |
-
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
|
331 |
-
|
332 |
-
# Train the model
|
333 |
-
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32, verbose=0)
|
334 |
-
|
335 |
-
# Evaluate the model
|
336 |
-
loss = model.evaluate(X_test, y_test, verbose=0)[0]
|
337 |
-
|
338 |
-
print(f"Final loss (without KFold): {loss}")
|
339 |
-
|
340 |
-
kfold = KFold(n_splits=10, shuffle=True)
|
341 |
-
|
342 |
-
inputs = np.concatenate((X_train, X_test), axis=0)
|
343 |
-
targets = np.concatenate((y_train, y_test), axis=0)
|
344 |
-
acc_per_fold = []
|
345 |
-
loss_per_fold = []
|
346 |
-
xgb_res = []
|
347 |
-
num_epochs = 10
|
348 |
-
batch_size = 32
|
349 |
-
|
350 |
-
fold_no = 1
|
351 |
-
print('------------------------------------------------------------------------')
|
352 |
-
print("Training for 10 folds... Standby")
|
353 |
-
for train, test in kfold.split(inputs, targets):
|
354 |
-
#print('------------------------------------------------------------------------')
|
355 |
-
#print(f'Training for fold {fold_no} ...')
|
356 |
-
history = model.fit(inputs[train], targets[train],
|
357 |
-
batch_size=32,
|
358 |
-
epochs=15,
|
359 |
-
verbose=0)
|
360 |
-
|
361 |
-
scores = model.evaluate(inputs[test], targets[test], verbose=0)
|
362 |
-
#print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
|
363 |
-
acc_per_fold.append(scores[1] * 100)
|
364 |
-
loss_per_fold.append(scores[0])
|
365 |
-
fold_no = fold_no + 1
|
366 |
-
|
367 |
-
|
368 |
-
print('------------------------------------------------------------------------')
|
369 |
-
#print('Score per fold')
|
370 |
-
#for i in range(0, len(acc_per_fold)):
|
371 |
-
# print('------------------------------------------------------------------------')
|
372 |
-
# print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Loss%: {acc_per_fold[i]}%')
|
373 |
-
#print('------------------------------------------------------------------------')
|
374 |
-
#print('Average scores for all folds:')
|
375 |
-
#print(f'> Possible Loss %: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
|
376 |
-
#print(f'> Loss: {np.mean(loss_per_fold)}')
|
377 |
-
#print('------------------------------------------------------------------------')
|
378 |
-
|
379 |
-
trek = df.iloc[0:len(df), 1:22]
|
380 |
-
Y = trek[0:len(trek)]
|
381 |
-
YP = trek[1:len(trek)]
|
382 |
-
Y1 = Y['Close']
|
383 |
-
Y2 = YP['Close']
|
384 |
-
Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
|
385 |
-
#X = sc.fit_transform(X.reshape(-1,22))
|
386 |
-
Y = np.array(Y)
|
387 |
-
Y1 = np.array(Y1)
|
388 |
-
Y = sc.fit_transform(Y)
|
389 |
-
Y1 = Y1.reshape(-1,1)
|
390 |
-
Y1 = sc.fit_transform(Y1)
|
391 |
-
|
392 |
-
train_X = Y.reshape(Y.shape[0],Y.shape[1],1)
|
393 |
-
#Y = Y.reshape(-1,1)
|
394 |
-
pred = model.predict(train_X, verbose=0)
|
395 |
-
pred = np.array(pred).reshape(-1,1)
|
396 |
-
var2 = max_error(pred.reshape(-1,1), Y1)
|
397 |
-
print('Max Error: %f' % var2)
|
398 |
-
prax[5] = float(var2)
|
399 |
-
pred = sc.inverse_transform(pred)
|
400 |
-
|
401 |
-
print(pred[-2], pred[-1])
|
402 |
-
prax[3] = pred[-2]
|
403 |
-
prax[4] = pred[-1]
|
404 |
-
if(pred[-1]-pred[-2]>0):
|
405 |
-
prax[6] = 1
|
406 |
-
elif(pred[-1]-pred[-2]==0):
|
407 |
-
prax[6] = 0
|
408 |
-
else:
|
409 |
-
prax[6] = -1
|
410 |
-
|
411 |
-
# %%
|
412 |
-
# Function to train the model (TFT)
|
413 |
-
def modelTFT(csv_file, prax):
|
414 |
-
train = csv_file
|
415 |
-
#test = pd.read_csv("/kaggle/input/artemis-test/nifty_daily.csv")
|
416 |
-
train['date'] = pd.to_datetime(train['Date/Time'])
|
417 |
-
#test['date'] = pd.to_datetime(test['Date'])
|
418 |
-
|
419 |
-
data = pd.concat([train], axis = 0, ignore_index=True)
|
420 |
-
# Check that key is country-store-product-date combination
|
421 |
-
#assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data)
|
422 |
-
# Check that there is one date per country-store-product combination
|
423 |
-
#assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data)//data['date'].nunique()
|
424 |
-
|
425 |
-
#display(train.sample(4))
|
426 |
-
|
427 |
-
"""<a id ="3"></a><h3 style="background:#0554f2; border:0; border-radius: 4px; color:#f5f6f7">Model Implementation in Pytorch-Forecasting </h3>"""
|
428 |
-
|
429 |
-
# Add a time_idx (an sequence of consecutive integers that goes from min to max date)
|
430 |
-
|
431 |
-
data = (data.merge((data[['Date/Time']].drop_duplicates(ignore_index=True)
|
432 |
-
.rename_axis('time_idx')).reset_index(), on = ['Date/Time']))
|
433 |
-
# add additional features
|
434 |
-
data["day_of_week"] = data['date'].dt.dayofweek.astype(str).astype("category") # categories have be strings
|
435 |
-
data["week_of_year"] = data['date'].dt.isocalendar().week.astype(str).astype("category") # categories have be strings
|
436 |
-
data["month"] = data['date'].dt.month.astype(str).astype("category") # categories have be strings
|
437 |
-
#data["log_num_sold"] = np.log(data.num_sold + 1e-8)
|
438 |
-
#data["avg_volume_by_country"] = data.groupby(["time_idx", "country"], observed=True).num_sold.transform("mean")
|
439 |
-
#data["avg_volume_by_store"] = data.groupby(["time_idx", "store"], observed=True).num_sold.transform("mean")
|
440 |
-
#data["avg_volume_by_product"] = data.groupby(["time_idx", "product"], observed=True).num_sold.transform("mean")
|
441 |
-
|
442 |
-
#unique_dates_country = data[['date', 'Ticker']].drop_duplicates(ignore_index = True)
|
443 |
-
#unique_dates_country['is_holiday'] = (unique_dates_country
|
444 |
-
# .apply(lambda x: x.date in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
445 |
-
#unique_dates_country['is_holiday_lead_1'] = (unique_dates_country
|
446 |
-
# .apply(lambda x: x.date+pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
447 |
-
#unique_dates_country['is_holiday_lead_2'] = (unique_dates_country
|
448 |
-
# .apply(lambda x: x.date+pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
449 |
-
#unique_dates_country['is_holiday_lag_1'] = (unique_dates_country
|
450 |
-
# .apply(lambda x: x.date-pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
451 |
-
#unique_dates_country['is_holiday_lag_2'] = (unique_dates_country
|
452 |
-
# .apply(lambda x: x.date-pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
453 |
-
#data = data.merge(unique_dates_country, on = ['date', 'Ticker'], validate = "m:1")
|
454 |
-
#del unique_dates_country
|
455 |
-
gc.collect()
|
456 |
-
data.sample(5, random_state=30)
|
457 |
-
|
458 |
-
train = data.iloc[:len(train)]
|
459 |
-
test = data.iloc[len(train):]
|
460 |
-
|
461 |
-
max_prediction_length = 2
|
462 |
-
max_encoder_length = train.date.nunique()
|
463 |
-
training_cutoff = train["time_idx"].max() - max_prediction_length #we will validate on 2020
|
464 |
-
|
465 |
-
# Let's create a Dataset
|
466 |
-
training = TimeSeriesDataSet(
|
467 |
-
train[lambda x: x.time_idx <= training_cutoff],
|
468 |
-
time_idx="time_idx",
|
469 |
-
target="Close",
|
470 |
-
group_ids=["Ticker"],
|
471 |
-
min_encoder_length=max_prediction_length, # keep encoder length long (as it is in the validation set)
|
472 |
-
max_encoder_length=max_encoder_length,
|
473 |
-
max_prediction_length=max_prediction_length,
|
474 |
-
static_categoricals=["Ticker"],
|
475 |
-
time_varying_known_categoricals=["month", "week_of_year", "day_of_week"],
|
476 |
-
#variable_groups={"is_holiday": ["is_holiday"]}, # group of categorical variables can be treated as one variable
|
477 |
-
time_varying_known_reals=["time_idx"],
|
478 |
-
time_varying_unknown_categoricals=[],
|
479 |
-
time_varying_unknown_reals=[
|
480 |
-
'Open','High','Low','Close','OI','RSI14','RSI44','HHRSI','Rsi Weekly','LLCHHV','white','Vap44','Vap14','Ema5','Ema20','Ema50','Ema200'
|
481 |
-
],
|
482 |
-
target_normalizer=GroupNormalizer(
|
483 |
-
groups=['Ticker'], transformation="softplus"
|
484 |
-
), # use softplus and normalize by group
|
485 |
-
categorical_encoders={
|
486 |
-
'week_of_year':NaNLabelEncoder(add_nan=True)
|
487 |
-
},
|
488 |
-
#lags={'num_sold': [7, 30, 365]},
|
489 |
-
add_relative_time_idx=True,
|
490 |
-
add_target_scales=True,
|
491 |
-
add_encoder_length=True,
|
492 |
-
)
|
493 |
-
|
494 |
-
# create validation set (predict=True) which means to predict the last max_prediction_length points in time
|
495 |
-
# for each series
|
496 |
-
validation = TimeSeriesDataSet.from_dataset(training, train, predict=True, stop_randomization=True)
|
497 |
-
|
498 |
-
# create dataloaders for model
|
499 |
-
batch_size = 128 # set this between 32 to 128
|
500 |
-
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
|
501 |
-
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
|
502 |
-
|
503 |
-
#let's see how a naive model does
|
504 |
-
|
505 |
-
actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)])#.cuda()
|
506 |
-
baseline_predictions = Baseline().predict(val_dataloader)#.cuda()
|
507 |
-
(actuals - baseline_predictions).abs().mean().item()
|
508 |
-
|
509 |
-
sm = SMAPE()
|
510 |
-
|
511 |
-
print(f"Median loss for naive prediction on validation: {sm.loss(actuals, baseline_predictions).mean(axis = 1).median().item()}")
|
512 |
-
|
513 |
-
early_stop_callback = EarlyStopping(monitor="train_loss", min_delta=1e-2, patience=PATIENCE, verbose=False, mode="min")
|
514 |
-
lr_logger = LearningRateMonitor() # log the learning rate
|
515 |
-
logger = TensorBoardLogger("lightning_logs") # logging results to a tensorboard
|
516 |
-
|
517 |
-
trainer = pl.Trainer(
|
518 |
-
max_epochs=1,
|
519 |
-
accelerator=ACCELERATOR,
|
520 |
-
enable_model_summary=False,
|
521 |
-
gradient_clip_val=0.25,
|
522 |
-
limit_train_batches=10, # coment in for training, running valiation every 30 batches
|
523 |
-
#fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
|
524 |
-
callbacks=[lr_logger, early_stop_callback],
|
525 |
-
logger=logger,
|
526 |
-
)
|
527 |
-
|
528 |
-
tft = TemporalFusionTransformer.from_dataset(
|
529 |
-
training,
|
530 |
-
learning_rate=LEARNING_RATE,
|
531 |
-
lstm_layers=2,
|
532 |
-
hidden_size=16,
|
533 |
-
attention_head_size=2,
|
534 |
-
dropout=0.2,
|
535 |
-
hidden_continuous_size=8,
|
536 |
-
output_size=1, # 7 quantiles by default
|
537 |
-
loss=SMAPE(),
|
538 |
-
log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
|
539 |
-
reduce_on_plateau_patience=4
|
540 |
-
)
|
541 |
-
|
542 |
-
tft.to(DEVICE)
|
543 |
-
trainer.fit(
|
544 |
-
tft,
|
545 |
-
train_dataloaders=train_dataloader,
|
546 |
-
val_dataloaders=val_dataloader,
|
547 |
-
)
|
548 |
-
#torch.cuda.empty_cache()
|
549 |
-
#print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
|
550 |
-
|
551 |
-
if OPTUNA:
|
552 |
-
from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
|
553 |
-
|
554 |
-
# create study
|
555 |
-
study = optimize_hyperparameters(
|
556 |
-
train_dataloader,
|
557 |
-
val_dataloader,
|
558 |
-
model_path="optuna_test",
|
559 |
-
n_trials=5,
|
560 |
-
max_epochs=MAX_EPOCHS,
|
561 |
-
gradient_clip_val_range=(0.01, 0.3),
|
562 |
-
hidden_size_range=(8, 24),
|
563 |
-
hidden_continuous_size_range=(8, 12),
|
564 |
-
attention_head_size_range=(2, 4),
|
565 |
-
learning_rate_range=(0.01, 0.05),
|
566 |
-
dropout_range=(0.1, 0.25),
|
567 |
-
trainer_kwargs=dict(limit_train_batches=20),
|
568 |
-
reduce_on_plateau_patience=4,
|
569 |
-
pruner=optuna.pruners.MedianPruner(n_min_trials=3, n_startup_trials=3),
|
570 |
-
use_learning_rate_finder=False, # use Optuna to find ideal learning rate or use in-built learning rate finder
|
571 |
-
)
|
572 |
-
#torch.cuda.empty_cache()
|
573 |
-
#'''
|
574 |
-
trainer = pl.Trainer(
|
575 |
-
max_epochs=MAX_EPOCHS,
|
576 |
-
accelerator=ACCELERATOR,
|
577 |
-
enable_model_summary=False,
|
578 |
-
gradient_clip_val=study.best_params['gradient_clip_val'],
|
579 |
-
limit_train_batches=20, # coment in for training, running valiation every 30 batches
|
580 |
-
#fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
|
581 |
-
callbacks=[lr_logger, early_stop_callback],
|
582 |
-
logger=logger,
|
583 |
-
)
|
584 |
-
|
585 |
-
tft = TemporalFusionTransformer.from_dataset(
|
586 |
-
training,
|
587 |
-
learning_rate=study.best_params['learning_rate'],
|
588 |
-
lstm_layers=2,
|
589 |
-
hidden_size=study.best_params['hidden_size'],
|
590 |
-
attention_head_size=study.best_params['attention_head_size'],
|
591 |
-
dropout=study.best_params['dropout'],
|
592 |
-
hidden_continuous_size=study.best_params['hidden_continuous_size'],
|
593 |
-
output_size=1, # 7 quantiles by default
|
594 |
-
loss=SMAPE(),
|
595 |
-
log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
|
596 |
-
reduce_on_plateau_patience=4
|
597 |
-
)
|
598 |
-
|
599 |
-
tft.to(DEVICE)
|
600 |
-
trainer.fit(
|
601 |
-
tft,
|
602 |
-
train_dataloaders=train_dataloader,
|
603 |
-
val_dataloaders=val_dataloader,
|
604 |
-
)
|
605 |
-
#'''
|
606 |
-
#torch.cuda.empty_cache()
|
607 |
-
best_model_path = trainer.checkpoint_callback.best_model_path
|
608 |
-
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
609 |
-
actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])#.cuda()
|
610 |
-
predictions = best_tft.predict(val_dataloader, mode="prediction")
|
611 |
-
raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
|
612 |
-
|
613 |
-
sm = SMAPE()
|
614 |
-
print(f"Validation median SMAPE loss: {sm.loss(actuals, predictions).mean(axis = 1).median().item()}")
|
615 |
-
prax[5] = sm.loss(actuals, predictions).mean(axis = 1).median().item()
|
616 |
-
#best_tft.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True);
|
617 |
-
|
618 |
-
print(raw_predictions[0][0])
|
619 |
-
prax[3] = '-'
|
620 |
-
prax[4] = raw_predictions[0][0].data.cpu().tolist()[0][0]
|
621 |
-
t = prax[4]
|
622 |
-
tm = data['Close'][len(data)-1]
|
623 |
-
if(t-tm>0):
|
624 |
-
prax[6] = 1
|
625 |
-
elif(t-tm==0):
|
626 |
-
prax[6] = 0
|
627 |
-
else:
|
628 |
-
prax[6] = -1
|
629 |
-
#prax[i][3] = raw_predictions[0][0].data[1]
|
630 |
-
print("-----------")
|
631 |
-
|
632 |
-
#with open("out.csv", "w", newline="") as f:
|
633 |
-
# writer = csv.writer(f)
|
634 |
-
# writer.writerows(prax)
|
635 |
-
|
636 |
-
# %%
|
637 |
-
# Function to train the model (TFT)
|
638 |
-
def modelTFT_OpenGap(csv_file, prax):
|
639 |
-
train = csv_file
|
640 |
-
#test = pd.read_csv("/kaggle/input/artemis-test/nifty_daily.csv")
|
641 |
-
train['date'] = pd.to_datetime(train['Date/Time'])
|
642 |
-
#test['date'] = pd.to_datetime(test['Date'])
|
643 |
-
datLength = len(train)
|
644 |
-
train['O-C'] = 0
|
645 |
-
for i in range(datLength):
|
646 |
-
if i == 0:
|
647 |
-
train['O-C'][i] = 0
|
648 |
-
continue
|
649 |
-
else:
|
650 |
-
train['O-C'][i] = train['Open'][i] - train['Close'][i-1]
|
651 |
-
data = pd.concat([train], axis = 0, ignore_index=True)
|
652 |
-
# Check that key is country-store-product-date combination
|
653 |
-
#assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data)
|
654 |
-
# Check that there is one date per country-store-product combination
|
655 |
-
#assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data)//data['date'].nunique()
|
656 |
-
|
657 |
-
#display(train.sample(4))
|
658 |
-
|
659 |
-
"""<a id ="3"></a><h3 style="background:#0554f2; border:0; border-radius: 4px; color:#f5f6f7">Model Implementation in Pytorch-Forecasting </h3>"""
|
660 |
-
|
661 |
-
# Add a time_idx (an sequence of consecutive integers that goes from min to max date)
|
662 |
-
|
663 |
-
data = (data.merge((data[['Date/Time']].drop_duplicates(ignore_index=True)
|
664 |
-
.rename_axis('time_idx')).reset_index(), on = ['Date/Time']))
|
665 |
-
# add additional features
|
666 |
-
data["day_of_week"] = data['date'].dt.dayofweek.astype(str).astype("category") # categories have be strings
|
667 |
-
data["week_of_year"] = data['date'].dt.isocalendar().week.astype(str).astype("category") # categories have be strings
|
668 |
-
data["month"] = data['date'].dt.month.astype(str).astype("category") # categories have be strings
|
669 |
-
#data["log_num_sold"] = np.log(data.num_sold + 1e-8)
|
670 |
-
#data["avg_volume_by_country"] = data.groupby(["time_idx", "country"], observed=True).num_sold.transform("mean")
|
671 |
-
#data["avg_volume_by_store"] = data.groupby(["time_idx", "store"], observed=True).num_sold.transform("mean")
|
672 |
-
#data["avg_volume_by_product"] = data.groupby(["time_idx", "product"], observed=True).num_sold.transform("mean")
|
673 |
-
|
674 |
-
#unique_dates_country = data[['date', 'Ticker']].drop_duplicates(ignore_index = True)
|
675 |
-
#unique_dates_country['is_holiday'] = (unique_dates_country
|
676 |
-
# .apply(lambda x: x.date in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
677 |
-
#unique_dates_country['is_holiday_lead_1'] = (unique_dates_country
|
678 |
-
# .apply(lambda x: x.date+pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
679 |
-
#unique_dates_country['is_holiday_lead_2'] = (unique_dates_country
|
680 |
-
# .apply(lambda x: x.date+pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
681 |
-
#unique_dates_country['is_holiday_lag_1'] = (unique_dates_country
|
682 |
-
# .apply(lambda x: x.date-pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
683 |
-
#unique_dates_country['is_holiday_lag_2'] = (unique_dates_country
|
684 |
-
# .apply(lambda x: x.date-pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
|
685 |
-
#data = data.merge(unique_dates_country, on = ['date', 'Ticker'], validate = "m:1")
|
686 |
-
#del unique_dates_country
|
687 |
-
gc.collect()
|
688 |
-
data.sample(5, random_state=30)
|
689 |
-
|
690 |
-
train = data.iloc[:len(train)]
|
691 |
-
test = data.iloc[len(train):]
|
692 |
-
|
693 |
-
max_prediction_length = 2
|
694 |
-
max_encoder_length = train.date.nunique()
|
695 |
-
training_cutoff = train["time_idx"].max() - max_prediction_length #we will validate on 2020
|
696 |
-
|
697 |
-
# Let's create a Dataset
|
698 |
-
training = TimeSeriesDataSet(
|
699 |
-
train[lambda x: x.time_idx <= training_cutoff],
|
700 |
-
time_idx="time_idx",
|
701 |
-
target="Close",
|
702 |
-
group_ids=["Ticker"],
|
703 |
-
min_encoder_length=max_prediction_length, # keep encoder length long (as it is in the validation set)
|
704 |
-
max_encoder_length=max_encoder_length,
|
705 |
-
max_prediction_length=max_prediction_length,
|
706 |
-
static_categoricals=["Ticker"],
|
707 |
-
time_varying_known_categoricals=["month", "week_of_year", "day_of_week"],
|
708 |
-
#variable_groups={"is_holiday": ["is_holiday"]}, # group of categorical variables can be treated as one variable
|
709 |
-
time_varying_known_reals=["time_idx"],
|
710 |
-
time_varying_unknown_categoricals=[],
|
711 |
-
time_varying_unknown_reals=[
|
712 |
-
'Open','High','Low','Close','OI','RSI14','RSI44','HHRSI','Rsi Weekly','LLCHHV','white','Vap44','Vap14','Ema5','Ema20','Ema50','Ema200', 'O-C'
|
713 |
-
],
|
714 |
-
target_normalizer=GroupNormalizer(
|
715 |
-
groups=['Ticker'], transformation="softplus"
|
716 |
-
), # use softplus and normalize by group
|
717 |
-
categorical_encoders={
|
718 |
-
'week_of_year':NaNLabelEncoder(add_nan=True)
|
719 |
-
},
|
720 |
-
#lags={'num_sold': [7, 30, 365]},
|
721 |
-
add_relative_time_idx=True,
|
722 |
-
add_target_scales=True,
|
723 |
-
add_encoder_length=True,
|
724 |
-
)
|
725 |
-
|
726 |
-
# create validation set (predict=True) which means to predict the last max_prediction_length points in time
|
727 |
-
# for each series
|
728 |
-
validation = TimeSeriesDataSet.from_dataset(training, train, predict=True, stop_randomization=True)
|
729 |
-
|
730 |
-
# create dataloaders for model
|
731 |
-
batch_size = 128 # set this between 32 to 128
|
732 |
-
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
|
733 |
-
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
|
734 |
-
|
735 |
-
#let's see how a naive model does
|
736 |
-
|
737 |
-
actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)])#.cuda()
|
738 |
-
baseline_predictions = Baseline().predict(val_dataloader)#.cuda()
|
739 |
-
(actuals - baseline_predictions).abs().mean().item()
|
740 |
-
|
741 |
-
sm = SMAPE()
|
742 |
-
|
743 |
-
print(f"Median loss for naive prediction on validation: {sm.loss(actuals, baseline_predictions).mean(axis = 1).median().item()}")
|
744 |
-
|
745 |
-
early_stop_callback = EarlyStopping(monitor="train_loss", min_delta=1e-2, patience=PATIENCE, verbose=False, mode="min")
|
746 |
-
lr_logger = LearningRateMonitor() # log the learning rate
|
747 |
-
logger = TensorBoardLogger("lightning_logs") # logging results to a tensorboard
|
748 |
-
|
749 |
-
trainer = pl.Trainer(
|
750 |
-
max_epochs=1,
|
751 |
-
accelerator=ACCELERATOR,
|
752 |
-
enable_model_summary=False,
|
753 |
-
gradient_clip_val=0.25,
|
754 |
-
limit_train_batches=10, # coment in for training, running valiation every 30 batches
|
755 |
-
#fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
|
756 |
-
callbacks=[lr_logger, early_stop_callback],
|
757 |
-
logger=logger,
|
758 |
-
)
|
759 |
-
|
760 |
-
tft = TemporalFusionTransformer.from_dataset(
|
761 |
-
training,
|
762 |
-
learning_rate=LEARNING_RATE,
|
763 |
-
lstm_layers=2,
|
764 |
-
hidden_size=16,
|
765 |
-
attention_head_size=2,
|
766 |
-
dropout=0.2,
|
767 |
-
hidden_continuous_size=8,
|
768 |
-
output_size=1, # 7 quantiles by default
|
769 |
-
loss=SMAPE(),
|
770 |
-
log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
|
771 |
-
reduce_on_plateau_patience=4
|
772 |
-
)
|
773 |
-
|
774 |
-
tft.to(DEVICE)
|
775 |
-
trainer.fit(
|
776 |
-
tft,
|
777 |
-
train_dataloaders=train_dataloader,
|
778 |
-
val_dataloaders=val_dataloader,
|
779 |
-
)
|
780 |
-
#torch.cuda.empty_cache()
|
781 |
-
#print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
|
782 |
-
|
783 |
-
if OPTUNA:
|
784 |
-
from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
|
785 |
-
|
786 |
-
# create study
|
787 |
-
study = optimize_hyperparameters(
|
788 |
-
train_dataloader,
|
789 |
-
val_dataloader,
|
790 |
-
model_path="optuna_test",
|
791 |
-
n_trials=5,
|
792 |
-
max_epochs=MAX_EPOCHS,
|
793 |
-
gradient_clip_val_range=(0.01, 0.3),
|
794 |
-
hidden_size_range=(8, 24),
|
795 |
-
hidden_continuous_size_range=(8, 12),
|
796 |
-
attention_head_size_range=(2, 4),
|
797 |
-
learning_rate_range=(0.01, 0.05),
|
798 |
-
dropout_range=(0.1, 0.25),
|
799 |
-
trainer_kwargs=dict(limit_train_batches=20),
|
800 |
-
reduce_on_plateau_patience=4,
|
801 |
-
pruner=optuna.pruners.MedianPruner(n_min_trials=3, n_warmup_steps=3),
|
802 |
-
use_learning_rate_finder=False, # use Optuna to find ideal learning rate or use in-built learning rate finder
|
803 |
-
)
|
804 |
-
#torch.cuda.empty_cache()
|
805 |
-
#'''
|
806 |
-
trainer = pl.Trainer(
|
807 |
-
max_epochs=MAX_EPOCHS,
|
808 |
-
accelerator=ACCELERATOR,
|
809 |
-
enable_model_summary=False,
|
810 |
-
gradient_clip_val=study.best_params['gradient_clip_val'],
|
811 |
-
limit_train_batches=20, # coment in for training, running valiation every 30 batches
|
812 |
-
#fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
|
813 |
-
callbacks=[lr_logger, early_stop_callback],
|
814 |
-
logger=logger,
|
815 |
-
)
|
816 |
-
|
817 |
-
tft = TemporalFusionTransformer.from_dataset(
|
818 |
-
training,
|
819 |
-
learning_rate=study.best_params['learning_rate'],
|
820 |
-
lstm_layers=2,
|
821 |
-
hidden_size=study.best_params['hidden_size'],
|
822 |
-
attention_head_size=study.best_params['attention_head_size'],
|
823 |
-
dropout=study.best_params['dropout'],
|
824 |
-
hidden_continuous_size=study.best_params['hidden_continuous_size'],
|
825 |
-
output_size=1, # 7 quantiles by default
|
826 |
-
loss=SMAPE(),
|
827 |
-
log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
|
828 |
-
reduce_on_plateau_patience=4
|
829 |
-
)
|
830 |
-
|
831 |
-
tft.to(DEVICE)
|
832 |
-
trainer.fit(
|
833 |
-
tft,
|
834 |
-
train_dataloaders=train_dataloader,
|
835 |
-
val_dataloaders=val_dataloader,
|
836 |
-
)
|
837 |
-
#'''
|
838 |
-
#torch.cuda.empty_cache()
|
839 |
-
best_model_path = trainer.checkpoint_callback.best_model_path
|
840 |
-
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
841 |
-
actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])#.cuda()
|
842 |
-
predictions = best_tft.predict(val_dataloader, mode="prediction")
|
843 |
-
raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
|
844 |
-
|
845 |
-
sm = SMAPE()
|
846 |
-
print(f"Validation median SMAPE loss: {sm.loss(actuals, predictions).mean(axis = 1).median().item()}")
|
847 |
-
prax[5] = sm.loss(actuals, predictions).mean(axis = 1).median().item()
|
848 |
-
#best_tft.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True);
|
849 |
-
|
850 |
-
print(raw_predictions[0][0])
|
851 |
-
prax[3] = '-'
|
852 |
-
prax[4] = raw_predictions[0][0].data.cpu().tolist()[0][0]
|
853 |
-
t = prax[4]
|
854 |
-
tm = data['Close'][len(data)-1]
|
855 |
-
if(t-tm>0):
|
856 |
-
prax[6] = 1
|
857 |
-
elif(t-tm==0):
|
858 |
-
prax[6] = 0
|
859 |
-
else:
|
860 |
-
prax[6] = -1
|
861 |
-
#prax[i][3] = raw_predictions[0][0].data[1]
|
862 |
-
print("-----------")
|
863 |
-
|
864 |
-
#with open("out.csv", "w", newline="") as f:
|
865 |
-
# writer = csv.writer(f)
|
866 |
-
# writer.writerows(prax)
|
867 |
-
|
868 |
-
# %%
|
869 |
-
def generate_csv(data_list):
|
870 |
-
today = date.today().strftime("%Y_%m_%d")
|
871 |
-
filename = f"result_{today}.csv"
|
872 |
-
file_exists = os.path.isfile(filename)
|
873 |
-
with open(filename, mode='a', newline='') as csv_file:
|
874 |
-
fieldnames = ['Ticker', 'Prev_Close_Real', 'Model', 'Prev_Close_Model', 'Close_Model', 'Max_Err', 'Up_Down' ] # replace with your own column names
|
875 |
-
writer = csv.writer(csv_file, delimiter=',')
|
876 |
-
if not file_exists:
|
877 |
-
writer.writerow(fieldnames) # file doesn't exist yet, write a header
|
878 |
-
writer.writerow(data_list)
|
879 |
-
csv_file.close()
|
880 |
-
|
881 |
-
def guess_date(string):
|
882 |
-
for fmt in ["%Y/%m/%d", "%d-%m-%Y", "%Y%m%d", "%m/%d/%Y", "%d/%m/%Y", "%Y-%m-%d", "%d/%m/%y", "%m/%d/%y"]:
|
883 |
-
try:
|
884 |
-
return datetime.datetime.strptime(string, fmt).date()
|
885 |
-
except ValueError:
|
886 |
-
continue
|
887 |
-
raise ValueError(string)
|
888 |
-
|
889 |
-
# %%
|
890 |
-
# Main function
|
891 |
-
def main():
|
892 |
-
# Data loading
|
893 |
-
path = "./tmp"
|
894 |
-
print("Searching CSV files in ", path, "...")
|
895 |
-
# path = "/kaggle/input/artemis-test"
|
896 |
-
|
897 |
-
# Get a list of all the CSV files in the folder
|
898 |
-
csv_files = glob.glob(path + "/*.csv")
|
899 |
-
prax = [0,0,0,0,0,0,0]
|
900 |
-
# Create a list of DataFrames, one for each CSV file
|
901 |
-
dfs = []
|
902 |
-
c = 0
|
903 |
-
for csv_file in csv_files:
|
904 |
-
df = pd.read_csv(csv_file)
|
905 |
-
dfs.append(df)
|
906 |
-
c = c + 1
|
907 |
-
|
908 |
-
if c == 0:
|
909 |
-
print("No CSV files found in ", path, ".")
|
910 |
-
print("Exiting...")
|
911 |
-
|
912 |
-
for df in dfs:
|
913 |
-
#print(df.head())
|
914 |
-
print(df['Ticker'][0])
|
915 |
-
prax[0] = df['Ticker'][0]
|
916 |
-
prax[1] = df['Close'][len(df)-1]
|
917 |
-
print('------------------')
|
918 |
-
#df = df.drop(['Volume'], axis=1)
|
919 |
-
for i in range(len(df)):
|
920 |
-
x = guess_date(df['Date/Time'][i])
|
921 |
-
df['Date/Time'][i] = x.strftime("%Y-%m-%d")
|
922 |
-
df['Date/Time'] = pd.to_datetime(df['Date/Time'])
|
923 |
-
df.fillna(0, inplace=True)
|
924 |
-
modelTFT(df, prax)
|
925 |
-
prax[2] = "TFT"
|
926 |
-
generate_csv(prax)
|
927 |
-
prax = [0,0,0,0,0,0,0]
|
928 |
-
modelTFT_OpenGap(df, prax)
|
929 |
-
prax[2] = "TFT_OpenGap"
|
930 |
-
generate_csv(prax)
|
931 |
-
#df.set_index('Date/Time', inplace=True)
|
932 |
-
df = df.drop(['Date/Time'], axis=1)
|
933 |
-
prax = [0,0,0,0,0,0,0]
|
934 |
-
modelCNNLSTM(df, prax)
|
935 |
-
prax[2] = "CNNLSTM"
|
936 |
-
generate_csv(prax)
|
937 |
-
prax = [0,0,0,0,0,0,0]
|
938 |
-
modelCNNLSTM_OpenGap(df, prax)
|
939 |
-
prax[2] = "CNNLSTM_OpenGap"
|
940 |
-
#print("Saving to CSV...Standby...")
|
941 |
-
generate_csv(prax)
|
942 |
-
# Generate blank line
|
943 |
-
prax=["","","","","","",""]
|
944 |
-
generate_csv(prax)
|
945 |
-
# Reset prax
|
946 |
-
prax = [0,0,0,0,0,0,0]
|
947 |
-
|
948 |
-
if __name__ == "__main__":
|
949 |
-
main()
|
950 |
-
|
951 |
-
print("Deleting temporary log files...")
|
952 |
-
# Delete "lightning_logs" directory
|
953 |
-
if os.path.exists("lightning_logs"):
|
954 |
-
shutil.rmtree("lightning_logs")
|
955 |
-
# Delete "optuna_test" directory
|
956 |
-
if os.path.exists("optuna_test"):
|
957 |
-
shutil.rmtree("optuna_test")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/ui/dropdown-menu.tsx
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
|
8 |
-
const DropdownMenu = DropdownMenuPrimitive.Root
|
9 |
-
|
10 |
-
const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger
|
11 |
-
|
12 |
-
const DropdownMenuGroup = DropdownMenuPrimitive.Group
|
13 |
-
|
14 |
-
const DropdownMenuPortal = DropdownMenuPrimitive.Portal
|
15 |
-
|
16 |
-
const DropdownMenuSub = DropdownMenuPrimitive.Sub
|
17 |
-
|
18 |
-
const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup
|
19 |
-
|
20 |
-
const DropdownMenuSubContent = React.forwardRef<
|
21 |
-
React.ElementRef<typeof DropdownMenuPrimitive.SubContent>,
|
22 |
-
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubContent>
|
23 |
-
>(({ className, ...props }, ref) => (
|
24 |
-
<DropdownMenuPrimitive.SubContent
|
25 |
-
ref={ref}
|
26 |
-
className={cn(
|
27 |
-
'z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow-md animate-in data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1',
|
28 |
-
className
|
29 |
-
)}
|
30 |
-
{...props}
|
31 |
-
/>
|
32 |
-
))
|
33 |
-
DropdownMenuSubContent.displayName =
|
34 |
-
DropdownMenuPrimitive.SubContent.displayName
|
35 |
-
|
36 |
-
const DropdownMenuContent = React.forwardRef<
|
37 |
-
React.ElementRef<typeof DropdownMenuPrimitive.Content>,
|
38 |
-
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content>
|
39 |
-
>(({ className, sideOffset = 4, ...props }, ref) => (
|
40 |
-
<DropdownMenuPrimitive.Portal>
|
41 |
-
<DropdownMenuPrimitive.Content
|
42 |
-
ref={ref}
|
43 |
-
sideOffset={sideOffset}
|
44 |
-
className={cn(
|
45 |
-
'z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow animate-in data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2',
|
46 |
-
className
|
47 |
-
)}
|
48 |
-
{...props}
|
49 |
-
/>
|
50 |
-
</DropdownMenuPrimitive.Portal>
|
51 |
-
))
|
52 |
-
DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName
|
53 |
-
|
54 |
-
const DropdownMenuItem = React.forwardRef<
|
55 |
-
React.ElementRef<typeof DropdownMenuPrimitive.Item>,
|
56 |
-
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item> & {
|
57 |
-
inset?: boolean
|
58 |
-
}
|
59 |
-
>(({ className, inset, ...props }, ref) => (
|
60 |
-
<DropdownMenuPrimitive.Item
|
61 |
-
ref={ref}
|
62 |
-
className={cn(
|
63 |
-
'relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50',
|
64 |
-
inset && 'pl-8',
|
65 |
-
className
|
66 |
-
)}
|
67 |
-
{...props}
|
68 |
-
/>
|
69 |
-
))
|
70 |
-
DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName
|
71 |
-
|
72 |
-
const DropdownMenuLabel = React.forwardRef<
|
73 |
-
React.ElementRef<typeof DropdownMenuPrimitive.Label>,
|
74 |
-
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Label> & {
|
75 |
-
inset?: boolean
|
76 |
-
}
|
77 |
-
>(({ className, inset, ...props }, ref) => (
|
78 |
-
<DropdownMenuPrimitive.Label
|
79 |
-
ref={ref}
|
80 |
-
className={cn(
|
81 |
-
'px-2 py-1.5 text-sm font-semibold',
|
82 |
-
inset && 'pl-8',
|
83 |
-
className
|
84 |
-
)}
|
85 |
-
{...props}
|
86 |
-
/>
|
87 |
-
))
|
88 |
-
DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName
|
89 |
-
|
90 |
-
const DropdownMenuSeparator = React.forwardRef<
|
91 |
-
React.ElementRef<typeof DropdownMenuPrimitive.Separator>,
|
92 |
-
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Separator>
|
93 |
-
>(({ className, ...props }, ref) => (
|
94 |
-
<DropdownMenuPrimitive.Separator
|
95 |
-
ref={ref}
|
96 |
-
className={cn('-mx-1 my-1 h-px bg-muted', className)}
|
97 |
-
{...props}
|
98 |
-
/>
|
99 |
-
))
|
100 |
-
DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName
|
101 |
-
|
102 |
-
const DropdownMenuShortcut = ({
|
103 |
-
className,
|
104 |
-
...props
|
105 |
-
}: React.HTMLAttributes<HTMLSpanElement>) => {
|
106 |
-
return (
|
107 |
-
<span
|
108 |
-
className={cn('ml-auto text-xs tracking-widest opacity-60', className)}
|
109 |
-
{...props}
|
110 |
-
/>
|
111 |
-
)
|
112 |
-
}
|
113 |
-
DropdownMenuShortcut.displayName = 'DropdownMenuShortcut'
|
114 |
-
|
115 |
-
export {
|
116 |
-
DropdownMenu,
|
117 |
-
DropdownMenuTrigger,
|
118 |
-
DropdownMenuContent,
|
119 |
-
DropdownMenuItem,
|
120 |
-
DropdownMenuLabel,
|
121 |
-
DropdownMenuSeparator,
|
122 |
-
DropdownMenuShortcut,
|
123 |
-
DropdownMenuGroup,
|
124 |
-
DropdownMenuPortal,
|
125 |
-
DropdownMenuSub,
|
126 |
-
DropdownMenuSubContent,
|
127 |
-
DropdownMenuRadioGroup
|
128 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/datasets/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/align.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from textgrid import TextGrid
|
6 |
-
|
7 |
-
from text_to_speech.utils.text.text_encoder import is_sil_phoneme
|
8 |
-
|
9 |
-
|
10 |
-
def get_mel2ph(tg_fn, ph, mel, hop_size, audio_sample_rate, min_sil_duration=0):
|
11 |
-
ph_list = ph.split(" ")
|
12 |
-
itvs = TextGrid.fromFile(tg_fn)[1]
|
13 |
-
itvs_ = []
|
14 |
-
for i in range(len(itvs)):
|
15 |
-
if itvs[i].maxTime - itvs[i].minTime < min_sil_duration and i > 0 and is_sil_phoneme(itvs[i].mark):
|
16 |
-
itvs_[-1].maxTime = itvs[i].maxTime
|
17 |
-
else:
|
18 |
-
itvs_.append(itvs[i])
|
19 |
-
itvs.intervals = itvs_
|
20 |
-
itv_marks = [itv.mark for itv in itvs]
|
21 |
-
tg_len = len([x for x in itvs if not is_sil_phoneme(x.mark)])
|
22 |
-
ph_len = len([x for x in ph_list if not is_sil_phoneme(x)])
|
23 |
-
assert tg_len == ph_len, (tg_len, ph_len, itv_marks, ph_list, tg_fn)
|
24 |
-
mel2ph = np.zeros([mel.shape[0]], int)
|
25 |
-
|
26 |
-
# to process aishell3_no_tone
|
27 |
-
# for _ in range(10):
|
28 |
-
|
29 |
-
# if itvs[-2].mark == '':
|
30 |
-
# start_time = itvs[-2].minTime
|
31 |
-
# end_time = itvs[-1].maxTime
|
32 |
-
# mark = itvs[-1].mark
|
33 |
-
# itvs[-2].maxTime = end_time
|
34 |
-
# itvs[-2].mark = mark
|
35 |
-
# itvs_ = []
|
36 |
-
# for i in range(len(itvs)-1):
|
37 |
-
# itvs_.append(itvs[i])
|
38 |
-
# itvs.intervals = itvs_
|
39 |
-
|
40 |
-
# if itvs[-1].mark == '':
|
41 |
-
# start_time = itvs[-2].minTime
|
42 |
-
# end_time = itvs[-1].maxTime
|
43 |
-
# itvs[-2].maxTime = end_time
|
44 |
-
# itvs_ = []
|
45 |
-
# for i in range(len(itvs)-1):
|
46 |
-
# itvs_.append(itvs[i])
|
47 |
-
# itvs.intervals = itvs_
|
48 |
-
|
49 |
-
|
50 |
-
i_itv = 0
|
51 |
-
i_ph = 0
|
52 |
-
while i_itv < len(itvs):
|
53 |
-
itv = itvs[i_itv]
|
54 |
-
ph = ph_list[i_ph]
|
55 |
-
itv_ph = itv.mark
|
56 |
-
start_frame = int(itv.minTime * audio_sample_rate / hop_size + 0.5)
|
57 |
-
end_frame = int(itv.maxTime * audio_sample_rate / hop_size + 0.5)
|
58 |
-
if is_sil_phoneme(itv_ph) and not is_sil_phoneme(ph):
|
59 |
-
mel2ph[start_frame:end_frame] = i_ph
|
60 |
-
i_itv += 1
|
61 |
-
elif not is_sil_phoneme(itv_ph) and is_sil_phoneme(ph):
|
62 |
-
i_ph += 1
|
63 |
-
else:
|
64 |
-
if not ((is_sil_phoneme(itv_ph) and is_sil_phoneme(ph)) \
|
65 |
-
or re.sub(r'\d+', '', itv_ph.lower()) == re.sub(r'\d+', '', ph.lower())):
|
66 |
-
print(f"| WARN: {tg_fn} phs are not same: ", itv_ph, ph, itv_marks, ph_list)
|
67 |
-
mel2ph[start_frame:end_frame] = i_ph + 1
|
68 |
-
i_ph += 1
|
69 |
-
i_itv += 1
|
70 |
-
mel2ph[-1] = mel2ph[-2]
|
71 |
-
assert not np.any(mel2ph == 0)
|
72 |
-
T_t = len(ph_list)
|
73 |
-
dur = mel2token_to_dur(mel2ph, T_t)
|
74 |
-
return mel2ph.tolist(), dur.tolist()
|
75 |
-
|
76 |
-
|
77 |
-
def split_audio_by_mel2ph(audio, mel2ph, hop_size, audio_num_mel_bins):
|
78 |
-
if isinstance(audio, torch.Tensor):
|
79 |
-
audio = audio.numpy()
|
80 |
-
if isinstance(mel2ph, torch.Tensor):
|
81 |
-
mel2ph = mel2ph.numpy()
|
82 |
-
assert len(audio.shape) == 1, len(mel2ph.shape) == 1
|
83 |
-
split_locs = []
|
84 |
-
for i in range(1, len(mel2ph)):
|
85 |
-
if mel2ph[i] != mel2ph[i - 1]:
|
86 |
-
split_loc = i * hop_size
|
87 |
-
split_locs.append(split_loc)
|
88 |
-
|
89 |
-
new_audio = []
|
90 |
-
for i in range(len(split_locs) - 1):
|
91 |
-
new_audio.append(audio[split_locs[i]:split_locs[i + 1]])
|
92 |
-
new_audio.append(np.zeros([0.5 * audio_num_mel_bins]))
|
93 |
-
return np.concatenate(new_audio)
|
94 |
-
|
95 |
-
|
96 |
-
def mel2token_to_dur(mel2token, T_txt=None, max_dur=None):
|
97 |
-
is_torch = isinstance(mel2token, torch.Tensor)
|
98 |
-
has_batch_dim = True
|
99 |
-
if not is_torch:
|
100 |
-
mel2token = torch.LongTensor(mel2token)
|
101 |
-
if T_txt is None:
|
102 |
-
T_txt = mel2token.max()
|
103 |
-
if len(mel2token.shape) == 1:
|
104 |
-
mel2token = mel2token[None, ...]
|
105 |
-
has_batch_dim = False
|
106 |
-
B, _ = mel2token.shape
|
107 |
-
dur = mel2token.new_zeros(B, T_txt + 1).scatter_add(1, mel2token, torch.ones_like(mel2token))
|
108 |
-
dur = dur[:, 1:]
|
109 |
-
if max_dur is not None:
|
110 |
-
dur = dur.clamp(max=max_dur)
|
111 |
-
if not is_torch:
|
112 |
-
dur = dur.numpy()
|
113 |
-
if not has_batch_dim:
|
114 |
-
dur = dur[0]
|
115 |
-
return dur
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZ2H/04-Gradio-SOTA-Seq2Seq-AutoQA/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 04 Gradio SOTA Seq2Seq AutoQA
|
3 |
-
emoji: 🧬❓
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/README.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
# YOLOv7
|
2 |
-
|
3 |
-
> [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696)
|
4 |
-
|
5 |
-
<!-- [ALGORITHM] -->
|
6 |
-
|
7 |
-
## Abstract
|
8 |
-
|
9 |
-
YOLOv7 surpasses all known object detectors in both speed and accuracy in the range from 5 FPS to 160 FPS and has the highest accuracy 56.8% AP among all known real-time object detectors with 30 FPS or higher on GPU V100. YOLOv7-E6 object detector (56 FPS V100, 55.9% AP) outperforms both transformer-based detector SWIN-L Cascade-Mask R-CNN (9.2 FPS A100, 53.9% AP) by 509% in speed and 2% in accuracy, and convolutional-based detector ConvNeXt-XL Cascade-Mask R-CNN (8.6 FPS A100, 55.2% AP) by 551% in speed and 0.7% AP in accuracy, as well as YOLOv7 outperforms: YOLOR, YOLOX, Scaled-YOLOv4, YOLOv5, DETR, Deformable DETR, DINO-5scale-R50, ViT-Adapter-B and many other object detectors in speed and accuracy. Moreover, we train YOLOv7 only on MS COCO dataset from scratch without using any other datasets or pre-trained weights. Source code is released in [this https URL](https://github.com/WongKinYiu/yolov7).
|
10 |
-
|
11 |
-
<div align=center>
|
12 |
-
<img src="https://user-images.githubusercontent.com/17425982/204231759-cc5c77a9-38c6-4a41-85be-eb97e4b2bcbb.png"/>
|
13 |
-
</div>
|
14 |
-
|
15 |
-
<div align=center>
|
16 |
-
<img alt="YOLOv7-l" src="https://user-images.githubusercontent.com/68552295/216335336-963bd03a-71f3-4556-97af-18b20d69e065.png" width = 95.5%/>
|
17 |
-
YOLOv7-l-P5 model structure
|
18 |
-
</div>
|
19 |
-
|
20 |
-
## Results and models
|
21 |
-
|
22 |
-
### COCO
|
23 |
-
|
24 |
-
| Backbone | Arch | Size | SyncBN | AMP | Mem (GB) | Box AP | Config | Download |
|
25 |
-
| :---------: | :--: | :--: | :----: | :-: | :------: | :----: | :----------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
|
26 |
-
| YOLOv7-tiny | P5 | 640 | Yes | Yes | 2.7 | 37.5 | [config](https://github.com/open-mmlab/mmyolo/tree/main/configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719.log.json) |
|
27 |
-
| YOLOv7-l | P5 | 640 | Yes | Yes | 10.3 | 50.9 | [config](https://github.com/open-mmlab/mmyolo/tree/main/configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco/yolov7_l_syncbn_fast_8x16b-300e_coco_20221123_023601-8113c0eb.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco/yolov7_l_syncbn_fast_8x16b-300e_coco_20221123_023601.log.json) |
|
28 |
-
| YOLOv7-x | P5 | 640 | Yes | Yes | 13.7 | 52.8 | [config](https://github.com/open-mmlab/mmyolo/tree/main/configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco/yolov7_x_syncbn_fast_8x16b-300e_coco_20221124_215331-ef949a68.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco/yolov7_x_syncbn_fast_8x16b-300e_coco_20221124_215331.log.json) |
|
29 |
-
| YOLOv7-w | P6 | 1280 | Yes | Yes | 27.0 | 54.1 | [config](https://github.com/open-mmlab/mmyolo/tree/main/configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco/yolov7_w-p6_syncbn_fast_8x16b-300e_coco_20221123_053031-a68ef9d2.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco/yolov7_w-p6_syncbn_fast_8x16b-300e_coco_20221123_053031.log.json) |
|
30 |
-
| YOLOv7-e | P6 | 1280 | Yes | Yes | 42.5 | 55.1 | [config](https://github.com/open-mmlab/mmyolo/tree/main/configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco/yolov7_e-p6_syncbn_fast_8x16b-300e_coco_20221126_102636-34425033.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco/yolov7_e-p6_syncbn_fast_8x16b-300e_coco_20221126_102636.log.json) |
|
31 |
-
|
32 |
-
**Note**:
|
33 |
-
In the official YOLOv7 code, the `random_perspective` data augmentation in COCO object detection task training uses mask annotation information, which leads to higher performance. Object detection should not use mask annotation, so only box annotation information is used in `MMYOLO`. We will use the mask annotation information in the instance segmentation task.
|
34 |
-
|
35 |
-
1. The performance is unstable and may fluctuate by about 0.3 mAP. The performance shown above is the best model.
|
36 |
-
2. If users need the weight of `YOLOv7-e2e`, they can train according to the configs provided by us, or convert the official weight according to the [converter script](https://github.com/open-mmlab/mmyolo/blob/main/tools/model_converters/yolov7_to_mmyolo.py).
|
37 |
-
3. `fast` means that `YOLOv5DetDataPreprocessor` and `yolov5_collate` are used for data preprocessing, which is faster for training, but less flexible for multitasking. Recommended to use fast version config if you only care about object detection.
|
38 |
-
4. `SyncBN` means use SyncBN, `AMP` indicates training with mixed precision.
|
39 |
-
5. We use 8x A100 for training, and the single-GPU batch size is 16. This is different from the official code.
|
40 |
-
|
41 |
-
## Citation
|
42 |
-
|
43 |
-
```latex
|
44 |
-
@article{wang2022yolov7,
|
45 |
-
title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors},
|
46 |
-
author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
|
47 |
-
journal={arXiv preprint arXiv:2207.02696},
|
48 |
-
year={2022}
|
49 |
-
}
|
50 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../../_base_/default_runtime.py',
|
3 |
-
'../../../_base_/datasets/deepfashion2.py'
|
4 |
-
]
|
5 |
-
|
6 |
-
default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
|
7 |
-
|
8 |
-
resume = False # 断点恢复
|
9 |
-
load_from = None # 模型权重加载
|
10 |
-
train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=10) # 训练轮数,测试间隔
|
11 |
-
param_scheduler = [
|
12 |
-
dict( # warmup策略
|
13 |
-
type='LinearLR',
|
14 |
-
begin=0,
|
15 |
-
end=500,
|
16 |
-
start_factor=0.001,
|
17 |
-
by_epoch=False),
|
18 |
-
dict( # scheduler
|
19 |
-
type='MultiStepLR',
|
20 |
-
begin=0,
|
21 |
-
end=150,
|
22 |
-
milestones=[100, 130],
|
23 |
-
gamma=0.1,
|
24 |
-
by_epoch=True)
|
25 |
-
]
|
26 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
|
27 |
-
auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
|
28 |
-
|
29 |
-
backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
|
30 |
-
dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
|
31 |
-
data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
|
32 |
-
data_root = 'data/deepfashion2/' # 数据存放路径
|
33 |
-
# 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
|
34 |
-
codec = dict(
|
35 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
36 |
-
|
37 |
-
train_pipeline = [
|
38 |
-
dict(type='LoadImage'),
|
39 |
-
dict(type='GetBBoxCenterScale'),
|
40 |
-
dict(type='RandomFlip', direction='horizontal'),
|
41 |
-
dict(
|
42 |
-
type='RandomBBoxTransform',
|
43 |
-
shift_prob=0,
|
44 |
-
rotate_factor=60,
|
45 |
-
scale_factor=(0.75, 1.25)),
|
46 |
-
dict(type='TopdownAffine', input_size=codec['input_size']),
|
47 |
-
dict(type='GenerateTarget', encoder=codec),
|
48 |
-
dict(type='PackPoseInputs')
|
49 |
-
]
|
50 |
-
val_pipeline = [ # 测试时数据增强
|
51 |
-
dict(type='LoadImage', backend_args=backend_args), # 加载图片
|
52 |
-
dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
|
53 |
-
dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
|
54 |
-
dict(type='PackPoseInputs') # 对target进行打包用于训练
|
55 |
-
]
|
56 |
-
train_dataloader = dict( # 训练数据加载
|
57 |
-
batch_size=8, # 批次大小
|
58 |
-
num_workers=6, # 数据加载进程数
|
59 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
60 |
-
sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
|
61 |
-
dataset=dict(
|
62 |
-
type=dataset_type, # 数据集类名
|
63 |
-
data_root=data_root, # 数据集路径
|
64 |
-
data_mode=data_mode, # 算法类型
|
65 |
-
ann_file='train/deepfashion2_short_sleeved_outwear.json', # 标注文件路径
|
66 |
-
data_prefix=dict(img='train/image/'), # 图像路径
|
67 |
-
pipeline=train_pipeline # 数据流水线
|
68 |
-
))
|
69 |
-
val_dataloader = dict(
|
70 |
-
batch_size=8,
|
71 |
-
num_workers=6,
|
72 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
73 |
-
drop_last=False,
|
74 |
-
sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
|
75 |
-
dataset=dict(
|
76 |
-
type=dataset_type, # 数据集类名
|
77 |
-
data_root=data_root, # 数据集路径
|
78 |
-
data_mode=data_mode, # 算法类型
|
79 |
-
ann_file='validation/deepfashion2_short_sleeved_outwear.json',
|
80 |
-
data_prefix=dict(img='validation/image/'), # 图像路径
|
81 |
-
test_mode=True, # 测试模式开关
|
82 |
-
pipeline=val_pipeline # 数据流水线
|
83 |
-
))
|
84 |
-
test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
85 |
-
|
86 |
-
channel_cfg = dict(
|
87 |
-
num_output_channels=294,
|
88 |
-
dataset_joints=294,
|
89 |
-
dataset_channel=[
|
90 |
-
[
|
91 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
92 |
-
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
93 |
-
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
|
94 |
-
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
95 |
-
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
|
96 |
-
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
|
97 |
-
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
|
98 |
-
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
|
99 |
-
129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
|
100 |
-
142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
|
101 |
-
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
|
102 |
-
168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
|
103 |
-
181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
|
104 |
-
194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
|
105 |
-
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
106 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
|
107 |
-
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
|
108 |
-
246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
|
109 |
-
259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
|
110 |
-
272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
|
111 |
-
285, 286, 287, 288, 289, 290, 291, 292, 293
|
112 |
-
],
|
113 |
-
],
|
114 |
-
inference_channel=[
|
115 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
116 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
117 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
118 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
119 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
120 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
121 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
122 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
123 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
124 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
125 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
126 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
127 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
128 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
129 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
130 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
131 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
132 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
133 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
134 |
-
290, 291, 292, 293
|
135 |
-
])
|
136 |
-
|
137 |
-
model = dict(
|
138 |
-
type='TopdownPoseEstimator', # 模型结构决定了算法流程
|
139 |
-
data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
|
140 |
-
type='PoseDataPreprocessor',
|
141 |
-
mean=[123.675, 116.28, 103.53],
|
142 |
-
std=[58.395, 57.12, 57.375],
|
143 |
-
bgr_to_rgb=True),
|
144 |
-
backbone=dict(
|
145 |
-
type='ResNet',
|
146 |
-
depth=50,
|
147 |
-
init_cfg=dict(
|
148 |
-
type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
|
149 |
-
checkpoint='torchvision://resnet50')),
|
150 |
-
head=dict( # 模型头部
|
151 |
-
type='HeatmapHead',
|
152 |
-
in_channels=2048,
|
153 |
-
out_channels=channel_cfg['num_output_channels'],
|
154 |
-
# deconv_out_channels=None,
|
155 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
|
156 |
-
decoder=codec), # 解码器,将heatmap解码成坐标值
|
157 |
-
test_cfg=dict(
|
158 |
-
flip_test=True, # 开启测试时水平翻转集成
|
159 |
-
flip_mode='heatmap', # 对heatmap进行翻转
|
160 |
-
shift_heatmap=True, # 对翻转后的结果进行平移提高精度
|
161 |
-
))
|
162 |
-
|
163 |
-
val_evaluator = [
|
164 |
-
dict(type='PCKAccuracy', thr=0.2),
|
165 |
-
dict(type='AUC'),
|
166 |
-
dict(type='EPE'),
|
167 |
-
]
|
168 |
-
test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
169 |
-
|
170 |
-
visualizer = dict(
|
171 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
172 |
-
dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdityaMahimkar/PlagiarismChecker/app.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import nltk
|
2 |
-
from nltk.tokenize import word_tokenize, sent_tokenize
|
3 |
-
from nltk.corpus import stopwords
|
4 |
-
nltk.download('punkt')
|
5 |
-
nltk.download('stopwords')
|
6 |
-
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
#longest common subsequence
|
10 |
-
#dynamic programming algorithm for finding lcs
|
11 |
-
def lcs(l1,l2):
|
12 |
-
s1=word_tokenize(l1)
|
13 |
-
s2=word_tokenize(l2)
|
14 |
-
# storing the dp values
|
15 |
-
dp = [[None]*(len(s1)+1) for i in range(len(s2)+1)]
|
16 |
-
|
17 |
-
for i in range(len(s2)+1):
|
18 |
-
for j in range(len(s1)+1):
|
19 |
-
if i == 0 or j == 0:
|
20 |
-
dp[i][j] = 0
|
21 |
-
elif s2[i-1] == s1[j-1]:
|
22 |
-
dp[i][j] = dp[i-1][j-1]+1
|
23 |
-
else:
|
24 |
-
dp[i][j] = max(dp[i-1][j] , dp[i][j-1])
|
25 |
-
return dp[len(s2)][len(s1)]
|
26 |
-
|
27 |
-
def plagiarismChecker(orig, plag):
|
28 |
-
sent_o=sent_tokenize(orig)
|
29 |
-
sent_p=sent_tokenize(plag)
|
30 |
-
|
31 |
-
tokens_p = word_tokenize(plag)
|
32 |
-
|
33 |
-
#maximum length of LCS for a sentence in suspicious text
|
34 |
-
max_lcs=0
|
35 |
-
sum_lcs=0
|
36 |
-
|
37 |
-
for i in sent_p:
|
38 |
-
for j in sent_o:
|
39 |
-
l=lcs(i,j)
|
40 |
-
max_lcs=max(max_lcs,l)
|
41 |
-
sum_lcs+=max_lcs
|
42 |
-
max_lcs=0
|
43 |
-
|
44 |
-
score=sum_lcs/len(tokens_p)
|
45 |
-
return score*100
|
46 |
-
|
47 |
-
plagiarismUI = gr.Interface(fn=plagiarismChecker, inputs=[gr.inputs.Textbox(lines=10, label='Text 1'), gr.inputs.Textbox(lines=10, label='Text 2')], outputs=gr.outputs.Textbox(label='Plagiarism Level'), title="Plagiarism Checker", theme='dark-peach')
|
48 |
-
plagiarismUI.launch(inbrowser=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/warppipeline.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import WarpPostFxPipeline from './shaders/warp/WarpPostFxPipeline.js';
|
2 |
-
export default WarpPostFxPipeline;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/canvasinput/CanvasInput.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CanvasInput from '../../../plugins/canvasinput';
|
2 |
-
export default CanvasInput;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/MenuSetInteractive.js
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
var MenuSetInteractive = function (menu) {
|
2 |
-
menu
|
3 |
-
// Expand sub event
|
4 |
-
.on(menu.root.expandEventName, function (button, index) {
|
5 |
-
if (this._isPassedEvent) {
|
6 |
-
return;
|
7 |
-
}
|
8 |
-
var childrenKey = this.root.childrenKey;
|
9 |
-
var subItems = this.items[index][childrenKey];
|
10 |
-
if (subItems) {
|
11 |
-
this.expandSubMenu(button, subItems);
|
12 |
-
} else {
|
13 |
-
// this.root.on('button.click', button); // TODO
|
14 |
-
}
|
15 |
-
}, menu)
|
16 |
-
// Click any button
|
17 |
-
.on('button.click', function (button, index, pointer, event) {
|
18 |
-
// Pass event to root menu object
|
19 |
-
if (this !== this.root) {
|
20 |
-
this.root._isPassedEvent = true;
|
21 |
-
this.root.emit('button.click', button, index, pointer, event);
|
22 |
-
this.root._isPassedEvent = false;
|
23 |
-
}
|
24 |
-
}, menu)
|
25 |
-
//Pointer over any button
|
26 |
-
.on('button.over', function (button, index, pointer, event) {
|
27 |
-
// Pass event to root menu object
|
28 |
-
if (this !== this.root) {
|
29 |
-
this.root._isPassedEvent = true;
|
30 |
-
this.root.emit('button.over', button, index, pointer, event);
|
31 |
-
this.root._isPassedEvent = false;
|
32 |
-
}
|
33 |
-
}, menu)
|
34 |
-
//Pointer out any button
|
35 |
-
.on('button.out', function (button, index, pointer, event) {
|
36 |
-
// Pass event to root menu object
|
37 |
-
if (this !== this.root) {
|
38 |
-
this.root._isPassedEvent = true;
|
39 |
-
this.root.emit('button.out', button, index, pointer, event);
|
40 |
-
this.root._isPassedEvent = false;
|
41 |
-
}
|
42 |
-
}, menu);
|
43 |
-
};
|
44 |
-
|
45 |
-
export default MenuSetInteractive;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alican/pixera/util/img2pixl.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import random
|
3 |
-
import numpy as np
|
4 |
-
from PIL import Image
|
5 |
-
#Author: Alican Akca
|
6 |
-
|
7 |
-
class pixL:
|
8 |
-
|
9 |
-
def __init__(self,numOfSquaresW = None, numOfSquaresH= None, size = [True, (512,512)],square = 6,ImgH = None,ImgW = None,image = None,background = None, pixValues = []):
|
10 |
-
self.size = size
|
11 |
-
self.ImgH = ImgH
|
12 |
-
self.ImgW = ImgW
|
13 |
-
self.image = image
|
14 |
-
self.square = square
|
15 |
-
self.pixValues = pixValues
|
16 |
-
self.background = background
|
17 |
-
self.numOfSquaresW = numOfSquaresW
|
18 |
-
self.numOfSquaresH = numOfSquaresH
|
19 |
-
|
20 |
-
def toThePixL(self,image, pixel_size, segMode= False):
|
21 |
-
self.square = pixel_size
|
22 |
-
self.image = Image.fromarray(image).convert("RGB").resize((512,512))
|
23 |
-
self.ImgW, self.ImgH = self.image.size
|
24 |
-
self.image = pixL.colorPicker(self)
|
25 |
-
pixL.complier(self)
|
26 |
-
if segMode == True:
|
27 |
-
return pixL.postprocess(self), self.pixValues
|
28 |
-
else:
|
29 |
-
return pixL.postprocess(self)
|
30 |
-
|
31 |
-
def postprocess(self):
|
32 |
-
image = self.background
|
33 |
-
size = (image.shape[0] - (image.shape[0] % 4), image.shape[1] - (image.shape[1] % 4))
|
34 |
-
image = cv2.resize(image, size)
|
35 |
-
return image
|
36 |
-
|
37 |
-
def numOfSquaresFunc(self):
|
38 |
-
self.numOfSquaresW = round((self.ImgW / self.square) + 1)
|
39 |
-
self.numOfSquaresH = round((self.ImgH / self.square) + 1)
|
40 |
-
|
41 |
-
def optimizer(RGB):
|
42 |
-
|
43 |
-
R_ = RGB[2]
|
44 |
-
G_ = RGB[1]
|
45 |
-
B_ = RGB[0]
|
46 |
-
|
47 |
-
if R_ < 50 and G_ < 50 and B_ < 50:
|
48 |
-
|
49 |
-
return (R_, G_, B_)
|
50 |
-
|
51 |
-
elif 220 < R_ < 255 and 220 < G_ < 255 and 220 < B_ < 255:
|
52 |
-
|
53 |
-
return (R_, G_, B_)
|
54 |
-
else:
|
55 |
-
sign = lambda x, y: random.choice([x,y])
|
56 |
-
|
57 |
-
R_ = RGB[2] + sign(+1,-1)*random.randint(1,10)
|
58 |
-
G_ = RGB[1] + sign(+1,-1)*random.randint(1,10)
|
59 |
-
B_ = RGB[0] + sign(+1,-1)*random.randint(1,10)
|
60 |
-
|
61 |
-
R_ = 0 if R_ < 0 else (255 if R_ > 255 else R_)
|
62 |
-
G_ = 0 if G_ < 0 else (255 if G_ > 255 else G_)
|
63 |
-
B_ = 0 if B_ < 0 else (255 if B_ > 255 else B_)
|
64 |
-
|
65 |
-
return (R_, G_, B_)
|
66 |
-
|
67 |
-
def colorPicker(self):
|
68 |
-
pixL.numOfSquaresFunc(self)
|
69 |
-
|
70 |
-
for j in range(1,self.numOfSquaresH):
|
71 |
-
|
72 |
-
for i in range(1,self.numOfSquaresW):
|
73 |
-
|
74 |
-
self.pixValues.append((self.image.getpixel((
|
75 |
-
i * self.square - self.square//2,
|
76 |
-
j * self.square - self.square//2)),
|
77 |
-
(i * self.square - self.square//2,
|
78 |
-
j * self.square - self.square//2)))
|
79 |
-
|
80 |
-
self.background = 255 * np.ones(shape=[self.ImgH - self.square,
|
81 |
-
self.ImgW - self.square*2, 3],
|
82 |
-
dtype=np.uint8)
|
83 |
-
|
84 |
-
def PEN(self,coorX,coorY,R,G,B):
|
85 |
-
SQUARE = self.square
|
86 |
-
cv2.rectangle(self.background,
|
87 |
-
pt1=(coorX - SQUARE, coorY - SQUARE), #0, 0 -> 0, 0
|
88 |
-
pt2=(coorX, coorY), #6, 6 -> 3, 3
|
89 |
-
color=(pixL.optimizer((R,G,B))),
|
90 |
-
thickness=-1)
|
91 |
-
|
92 |
-
cv2.rectangle(self.background,
|
93 |
-
pt1=(coorX, coorY - SQUARE), #0, 0 -> 3, 0
|
94 |
-
pt2=(coorX + SQUARE, coorY), #6, 6 -> 6, 3
|
95 |
-
color=(pixL.optimizer((R,G,B))),
|
96 |
-
thickness=-1)
|
97 |
-
|
98 |
-
cv2.rectangle(self.background,
|
99 |
-
pt1=(coorX - SQUARE, coorY), #0, 0 -> 0, 3
|
100 |
-
pt2=(coorX, coorY + SQUARE), #6, 6 -> 3, 6
|
101 |
-
color=(pixL.optimizer((R,G,B))),
|
102 |
-
thickness=-1)
|
103 |
-
|
104 |
-
cv2.rectangle(self.background,
|
105 |
-
pt1=(coorX, coorY), #0, 0 -> 3, 3
|
106 |
-
pt2=(coorX + SQUARE, coorY + SQUARE), #6, 6 -> 6, 6
|
107 |
-
color=(pixL.optimizer((R,G,B))),
|
108 |
-
thickness=-1)
|
109 |
-
|
110 |
-
def complier(self):
|
111 |
-
for index, value in enumerate(self.pixValues):
|
112 |
-
(R,G,B), (coorX, coorY) = value
|
113 |
-
pixL.PEN(self,coorX,coorY,R,G,B)
|
114 |
-
self.background = np.array(self.background).astype(np.uint8)
|
115 |
-
self.background = cv2.resize(self.background, (self.ImgW,self.ImgH), interpolation = cv2.INTER_AREA)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/visualizer_drag_gradio_inversion.py
DELETED
@@ -1,1002 +0,0 @@
|
|
1 |
-
# https://huggingface.co/DragGan/DragGan-Models
|
2 |
-
# https://arxiv.org/abs/2305.10973
|
3 |
-
import os
|
4 |
-
import os.path as osp
|
5 |
-
from argparse import ArgumentParser
|
6 |
-
from functools import partial
|
7 |
-
from pathlib import Path
|
8 |
-
import time
|
9 |
-
import tempfile
|
10 |
-
import psutil
|
11 |
-
|
12 |
-
import gradio as gr
|
13 |
-
import numpy as np
|
14 |
-
import torch
|
15 |
-
from PIL import Image
|
16 |
-
import uuid
|
17 |
-
|
18 |
-
import dnnlib
|
19 |
-
from gradio_utils import (
|
20 |
-
ImageMask,
|
21 |
-
draw_mask_on_image,
|
22 |
-
draw_points_on_image,
|
23 |
-
get_latest_points_pair,
|
24 |
-
get_valid_mask,
|
25 |
-
on_change_single_global_state,
|
26 |
-
)
|
27 |
-
from viz.renderer import Renderer, add_watermark_np
|
28 |
-
from torch_utils.pti import run_PTI, export_updated_pickle
|
29 |
-
|
30 |
-
# download models from Hugging Face hub
|
31 |
-
from huggingface_hub import snapshot_download
|
32 |
-
|
33 |
-
model_dir = Path("./checkpoints")
|
34 |
-
snapshot_download("DragGan/DragGan-Models", repo_type="model", local_dir=model_dir)
|
35 |
-
|
36 |
-
# parser = ArgumentParser()
|
37 |
-
# parser.add_argument('--share', action='store_true')
|
38 |
-
# parser.add_argument('--cache-dir', type=str, default='./checkpoints')
|
39 |
-
# args = parser.parse_args()
|
40 |
-
|
41 |
-
cache_dir = model_dir
|
42 |
-
|
43 |
-
device = "cuda"
|
44 |
-
IS_SPACE = "DragGan/DragGan" in os.environ.get("SPACE_ID", "")
|
45 |
-
TIMEOUT = 80
|
46 |
-
|
47 |
-
|
48 |
-
def reverse_point_pairs(points):
|
49 |
-
new_points = []
|
50 |
-
for p in points:
|
51 |
-
new_points.append([p[1], p[0]])
|
52 |
-
return new_points
|
53 |
-
|
54 |
-
|
55 |
-
def clear_state(global_state, target=None):
|
56 |
-
"""Clear target history state from global_state
|
57 |
-
If target is not defined, points and mask will be both removed.
|
58 |
-
1. set global_state['points'] as empty dict
|
59 |
-
2. set global_state['mask'] as full-one mask.
|
60 |
-
"""
|
61 |
-
if target is None:
|
62 |
-
target = ["point", "mask"]
|
63 |
-
if not isinstance(target, list):
|
64 |
-
target = [target]
|
65 |
-
if "point" in target:
|
66 |
-
global_state["points"] = dict()
|
67 |
-
print("Clear Points State!")
|
68 |
-
if "mask" in target:
|
69 |
-
image_raw = global_state["images"]["image_raw"]
|
70 |
-
global_state["mask"] = np.ones(
|
71 |
-
(image_raw.size[1], image_raw.size[0]), dtype=np.uint8
|
72 |
-
)
|
73 |
-
print("Clear mask State!")
|
74 |
-
|
75 |
-
return global_state
|
76 |
-
|
77 |
-
|
78 |
-
def init_images(global_state):
|
79 |
-
"""This function is called only ones with Gradio App is started.
|
80 |
-
0. pre-process global_state, unpack value from global_state of need
|
81 |
-
1. Re-init renderer
|
82 |
-
2. run `renderer._render_drag_impl` with `is_drag=False` to generate
|
83 |
-
new image
|
84 |
-
3. Assign images to global state and re-generate mask
|
85 |
-
"""
|
86 |
-
|
87 |
-
if isinstance(global_state, gr.State):
|
88 |
-
state = global_state.value
|
89 |
-
else:
|
90 |
-
state = global_state
|
91 |
-
|
92 |
-
state["renderer"].init_network(
|
93 |
-
state["generator_params"], # res
|
94 |
-
state["pretrained_weight"], # pkl
|
95 |
-
state["params"]["seed"], # w0_seed,
|
96 |
-
state["w_pivot"], # w_load
|
97 |
-
state["params"]["latent_space"] == "w+", # w_plus
|
98 |
-
"const",
|
99 |
-
state["params"]["trunc_psi"], # trunc_psi,
|
100 |
-
state["params"]["trunc_cutoff"], # trunc_cutoff,
|
101 |
-
None, # input_transform
|
102 |
-
state["params"]["lr"], # lr,
|
103 |
-
)
|
104 |
-
|
105 |
-
state["renderer"]._render_drag_impl(
|
106 |
-
state["generator_params"], is_drag=False, to_pil=True
|
107 |
-
)
|
108 |
-
|
109 |
-
init_image = state["generator_params"].image
|
110 |
-
state["images"]["image_orig"] = init_image
|
111 |
-
state["images"]["image_raw"] = init_image
|
112 |
-
state["images"]["image_show"] = Image.fromarray(
|
113 |
-
add_watermark_np(np.array(init_image))
|
114 |
-
)
|
115 |
-
state["mask"] = np.ones((init_image.size[1], init_image.size[0]), dtype=np.uint8)
|
116 |
-
return global_state
|
117 |
-
|
118 |
-
|
119 |
-
def update_image_draw(image, points, mask, show_mask, global_state=None):
|
120 |
-
image_draw = draw_points_on_image(image, points)
|
121 |
-
if (
|
122 |
-
show_mask
|
123 |
-
and mask is not None
|
124 |
-
and not (mask == 0).all()
|
125 |
-
and not (mask == 1).all()
|
126 |
-
):
|
127 |
-
image_draw = draw_mask_on_image(image_draw, mask)
|
128 |
-
|
129 |
-
image_draw = Image.fromarray(add_watermark_np(np.array(image_draw)))
|
130 |
-
if global_state is not None:
|
131 |
-
global_state["images"]["image_show"] = image_draw
|
132 |
-
return image_draw
|
133 |
-
|
134 |
-
|
135 |
-
def preprocess_mask_info(global_state, image):
|
136 |
-
"""Function to handle mask information.
|
137 |
-
1. last_mask is None: Do not need to change mask, return mask
|
138 |
-
2. last_mask is not None:
|
139 |
-
2.1 global_state is remove_mask:
|
140 |
-
2.2 global_state is add_mask:
|
141 |
-
"""
|
142 |
-
if isinstance(image, dict):
|
143 |
-
last_mask = get_valid_mask(image["mask"])
|
144 |
-
else:
|
145 |
-
last_mask = None
|
146 |
-
mask = global_state["mask"]
|
147 |
-
|
148 |
-
# mask in global state is a placeholder with all 1.
|
149 |
-
if (mask == 1).all():
|
150 |
-
mask = last_mask
|
151 |
-
|
152 |
-
# last_mask = global_state['last_mask']
|
153 |
-
editing_mode = global_state["editing_state"]
|
154 |
-
|
155 |
-
if last_mask is None:
|
156 |
-
return global_state
|
157 |
-
|
158 |
-
if editing_mode == "remove_mask":
|
159 |
-
updated_mask = np.clip(mask - last_mask, 0, 1)
|
160 |
-
print(f"Last editing_state is {editing_mode}, do remove.")
|
161 |
-
elif editing_mode == "add_mask":
|
162 |
-
updated_mask = np.clip(mask + last_mask, 0, 1)
|
163 |
-
print(f"Last editing_state is {editing_mode}, do add.")
|
164 |
-
else:
|
165 |
-
updated_mask = mask
|
166 |
-
print(f"Last editing_state is {editing_mode}, " "do nothing to mask.")
|
167 |
-
|
168 |
-
global_state["mask"] = updated_mask
|
169 |
-
# global_state['last_mask'] = None # clear buffer
|
170 |
-
return global_state
|
171 |
-
|
172 |
-
|
173 |
-
def print_memory_usage():
|
174 |
-
# Print system memory usage
|
175 |
-
print(f"System memory usage: {psutil.virtual_memory().percent}%")
|
176 |
-
|
177 |
-
# Print GPU memory usage
|
178 |
-
if torch.cuda.is_available():
|
179 |
-
device = torch.device("cuda")
|
180 |
-
print(f"GPU memory usage: {torch.cuda.memory_allocated() / 1e9} GB")
|
181 |
-
print(f"Max GPU memory usage: {torch.cuda.max_memory_allocated() / 1e9} GB")
|
182 |
-
device_properties = torch.cuda.get_device_properties(device)
|
183 |
-
available_memory = (
|
184 |
-
device_properties.total_memory - torch.cuda.max_memory_allocated()
|
185 |
-
)
|
186 |
-
print(f"Available GPU memory: {available_memory / 1e9} GB")
|
187 |
-
else:
|
188 |
-
print("No GPU available")
|
189 |
-
|
190 |
-
|
191 |
-
# filter large models running on SPAC
|
192 |
-
|
193 |
-
css = """
|
194 |
-
#output-image {
|
195 |
-
width: 100% !important;
|
196 |
-
aspect-ratio: 1 / 1 !important;
|
197 |
-
height: auto !important;
|
198 |
-
}
|
199 |
-
#output-image canvas {
|
200 |
-
width: 100% !important;
|
201 |
-
aspect-ratio: 1 / 1 !important;
|
202 |
-
height: auto !important;
|
203 |
-
}
|
204 |
-
"""
|
205 |
-
with gr.Blocks(css=css) as app:
|
206 |
-
gr.Markdown(
|
207 |
-
"""
|
208 |
-
# DragGAN - Drag Your GAN - Face Inversion
|
209 |
-
|
210 |
-
## Interactive Point-based Manipulation on the Generative Image Manifold
|
211 |
-
### Unofficial Gradio Demo
|
212 |
-
|
213 |
-
**Due to high demand, only one model can be run at a time, or you can duplicate the space and run your own copy.**
|
214 |
-
|
215 |
-
<a href="https://huggingface.co/spaces/DragGan/DragGan-Inversion?duplicate=true" style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
|
216 |
-
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> for no queue on your own hardware.</p>
|
217 |
-
|
218 |
-
* Official Repo: [XingangPan](https://github.com/XingangPan/DragGAN)
|
219 |
-
* Gradio Demo by: [LeoXing1996](https://github.com/LeoXing1996) © [OpenMMLab MMagic](https://github.com/open-mmlab/mmagic)
|
220 |
-
* Inversion Code: [ProgrammingHut](https://www.youtube.com/watch?v=viWiOC1Mikw), [EthanZhangCN](https://github.com/EthanZhangCN)
|
221 |
-
"""
|
222 |
-
)
|
223 |
-
|
224 |
-
# renderer = Renderer()
|
225 |
-
global_state = gr.State(
|
226 |
-
{
|
227 |
-
"images": {
|
228 |
-
# image_orig: the original image, change with seed/model is changed
|
229 |
-
# image_raw: image with mask and points, change durning optimization
|
230 |
-
# image_show: image showed on screen
|
231 |
-
},
|
232 |
-
"temporal_params": {
|
233 |
-
# stop
|
234 |
-
},
|
235 |
-
"w_pivot": None,
|
236 |
-
"mask": None, # mask for visualization, 1 for editing and 0 for unchange
|
237 |
-
"last_mask": None, # last edited mask
|
238 |
-
"show_mask": True, # add button
|
239 |
-
"generator_params": dnnlib.EasyDict(),
|
240 |
-
"params": {
|
241 |
-
"seed": int(np.random.randint(0, 2**32 - 1)),
|
242 |
-
"motion_lambda": 20,
|
243 |
-
"r1_in_pixels": 3,
|
244 |
-
"r2_in_pixels": 12,
|
245 |
-
"magnitude_direction_in_pixels": 1.0,
|
246 |
-
"latent_space": "w+",
|
247 |
-
"trunc_psi": 0.7,
|
248 |
-
"trunc_cutoff": None,
|
249 |
-
"lr": 0.01,
|
250 |
-
},
|
251 |
-
"device": device,
|
252 |
-
"draw_interval": 1,
|
253 |
-
"renderer": Renderer(disable_timing=True),
|
254 |
-
"points": {},
|
255 |
-
"curr_point": None,
|
256 |
-
"curr_type_point": "start",
|
257 |
-
"editing_state": "add_points",
|
258 |
-
"pretrained_weight": str(model_dir / "stylegan2-ffhq1024x1024.pkl"),
|
259 |
-
}
|
260 |
-
)
|
261 |
-
|
262 |
-
# init image
|
263 |
-
global_state = init_images(global_state)
|
264 |
-
with gr.Row():
|
265 |
-
with gr.Row():
|
266 |
-
# Left --> tools
|
267 |
-
with gr.Column():
|
268 |
-
# Latent
|
269 |
-
with gr.Row():
|
270 |
-
with gr.Column(scale=1, min_width=10):
|
271 |
-
gr.Markdown(value="Latent", show_label=False)
|
272 |
-
|
273 |
-
with gr.Column(scale=4, min_width=10):
|
274 |
-
form_seed_number = gr.Slider(
|
275 |
-
mininium=0,
|
276 |
-
maximum=2**32 - 1,
|
277 |
-
step=1,
|
278 |
-
value=global_state.value["params"]["seed"],
|
279 |
-
interactive=True,
|
280 |
-
# randomize=True,
|
281 |
-
label="Seed",
|
282 |
-
)
|
283 |
-
form_lr_number = gr.Number(
|
284 |
-
value=global_state.value["params"]["lr"],
|
285 |
-
precision=5,
|
286 |
-
interactive=True,
|
287 |
-
label="Step Size",
|
288 |
-
)
|
289 |
-
|
290 |
-
with gr.Row():
|
291 |
-
with gr.Column(scale=2, min_width=10):
|
292 |
-
form_reset_image = gr.Button("Reset Image")
|
293 |
-
with gr.Column(scale=3, min_width=10):
|
294 |
-
form_latent_space = gr.Radio(
|
295 |
-
["w", "w+"],
|
296 |
-
value=global_state.value["params"]["latent_space"],
|
297 |
-
interactive=True,
|
298 |
-
label="Latent space to optimize",
|
299 |
-
show_label=False,
|
300 |
-
)
|
301 |
-
with gr.Row():
|
302 |
-
with gr.Column(scale=3, min_width=10):
|
303 |
-
form_custom_image = gr.Image(
|
304 |
-
type="filepath", label="Custom Image", height=100
|
305 |
-
)
|
306 |
-
with gr.Column(scale=3, min_width=10):
|
307 |
-
form_reset_custom_image = gr.Button(
|
308 |
-
"Remove Custom Image", interactive=False
|
309 |
-
)
|
310 |
-
|
311 |
-
# Drag
|
312 |
-
with gr.Row():
|
313 |
-
with gr.Column(scale=1, min_width=10):
|
314 |
-
gr.Markdown(value="Drag", show_label=False)
|
315 |
-
with gr.Column(scale=4, min_width=10):
|
316 |
-
with gr.Row():
|
317 |
-
with gr.Column(scale=1, min_width=10):
|
318 |
-
enable_add_points = gr.Button("Add Points")
|
319 |
-
with gr.Column(scale=1, min_width=10):
|
320 |
-
undo_points = gr.Button("Reset Points")
|
321 |
-
with gr.Row():
|
322 |
-
with gr.Column(scale=1, min_width=10):
|
323 |
-
form_start_btn = gr.Button("Start")
|
324 |
-
with gr.Column(scale=1, min_width=10):
|
325 |
-
form_stop_btn = gr.Button("Stop")
|
326 |
-
|
327 |
-
form_steps_number = gr.Number(
|
328 |
-
value=0, label="Steps", interactive=False
|
329 |
-
)
|
330 |
-
|
331 |
-
# Mask
|
332 |
-
with gr.Row():
|
333 |
-
with gr.Column(scale=1, min_width=10):
|
334 |
-
gr.Markdown(value="Mask", show_label=False)
|
335 |
-
with gr.Column(scale=4, min_width=10):
|
336 |
-
enable_add_mask = gr.Button("Edit Flexible Area")
|
337 |
-
with gr.Row():
|
338 |
-
with gr.Column(scale=1, min_width=10):
|
339 |
-
form_reset_mask_btn = gr.Button("Reset mask")
|
340 |
-
with gr.Column(scale=1, min_width=10):
|
341 |
-
show_mask = gr.Checkbox(
|
342 |
-
label="Show Mask",
|
343 |
-
value=global_state.value["show_mask"],
|
344 |
-
show_label=False,
|
345 |
-
)
|
346 |
-
|
347 |
-
with gr.Row():
|
348 |
-
form_lambda_number = gr.Number(
|
349 |
-
value=global_state.value["params"]["motion_lambda"],
|
350 |
-
interactive=True,
|
351 |
-
label="Lambda",
|
352 |
-
)
|
353 |
-
|
354 |
-
form_draw_interval_number = gr.Number(
|
355 |
-
value=global_state.value["draw_interval"],
|
356 |
-
label="Draw Interval (steps)",
|
357 |
-
interactive=True,
|
358 |
-
visible=False,
|
359 |
-
)
|
360 |
-
|
361 |
-
# Right --> Image
|
362 |
-
with gr.Column(scale=2):
|
363 |
-
form_image = ImageMask(
|
364 |
-
value=global_state.value["images"]["image_show"],
|
365 |
-
brush_radius=100,
|
366 |
-
elem_id="output-image",
|
367 |
-
)
|
368 |
-
gr.Markdown(
|
369 |
-
"""
|
370 |
-
## Quick Start
|
371 |
-
|
372 |
-
1. Select desired `Pretrained Model` and adjust `Seed` to generate an
|
373 |
-
initial image.
|
374 |
-
2. Click on image to add control points.
|
375 |
-
3. Click `Start` and enjoy it!
|
376 |
-
|
377 |
-
## Advance Usage
|
378 |
-
|
379 |
-
1. Change `Step Size` to adjust learning rate in drag optimization.
|
380 |
-
2. Select `w` or `w+` to change latent space to optimize:
|
381 |
-
* Optimize on `w` space may cause greater influence to the image.
|
382 |
-
* Optimize on `w+` space may work slower than `w`, but usually achieve
|
383 |
-
better results.
|
384 |
-
* Note that changing the latent space will reset the image, points and
|
385 |
-
mask (this has the same effect as `Reset Image` button).
|
386 |
-
3. Click `Edit Flexible Area` to create a mask and constrain the
|
387 |
-
unmasked region to remain unchanged.
|
388 |
-
|
389 |
-
|
390 |
-
"""
|
391 |
-
)
|
392 |
-
gr.HTML(
|
393 |
-
"""
|
394 |
-
<style>
|
395 |
-
.container {
|
396 |
-
position: absolute;
|
397 |
-
height: 50px;
|
398 |
-
text-align: center;
|
399 |
-
line-height: 50px;
|
400 |
-
width: 100%;
|
401 |
-
}
|
402 |
-
</style>
|
403 |
-
<div class="container">
|
404 |
-
Gradio demo supported by
|
405 |
-
<img src="https://avatars.githubusercontent.com/u/10245193?s=200&v=4" height="20" width="20" style="display:inline;">
|
406 |
-
<a href="https://github.com/open-mmlab/mmagic">OpenMMLab MMagic</a>
|
407 |
-
</div>
|
408 |
-
"""
|
409 |
-
)
|
410 |
-
# Network & latents tab listeners
|
411 |
-
|
412 |
-
def on_click_reset_image(global_state):
|
413 |
-
"""Reset image to the original one and clear all states
|
414 |
-
1. Re-init images
|
415 |
-
2. Clear all states
|
416 |
-
"""
|
417 |
-
|
418 |
-
init_images(global_state)
|
419 |
-
clear_state(global_state)
|
420 |
-
|
421 |
-
return global_state, global_state["images"]["image_show"]
|
422 |
-
|
423 |
-
def on_click_reset_custom_image(global_state):
|
424 |
-
"""Reset image to the original one and clear all states
|
425 |
-
1. Re-init images
|
426 |
-
2. Clear all states
|
427 |
-
"""
|
428 |
-
Path(global_state["pretrained_weight"]).unlink(missing_ok=True)
|
429 |
-
global_state["w_pivot"] = None
|
430 |
-
global_state["pretrained_weight"] = str(
|
431 |
-
model_dir / "stylegan2-ffhq1024x1024.pkl"
|
432 |
-
)
|
433 |
-
|
434 |
-
init_images(global_state)
|
435 |
-
clear_state(global_state)
|
436 |
-
|
437 |
-
return global_state, global_state["images"]["image_show"]
|
438 |
-
|
439 |
-
def on_image_change(
|
440 |
-
custom_image, global_state, progress=gr.Progress(track_tqdm=True)
|
441 |
-
):
|
442 |
-
new_img = Image.open(custom_image)
|
443 |
-
new_img = new_img.convert("RGB")
|
444 |
-
from PTI.configs import paths_config
|
445 |
-
|
446 |
-
paths_config.stylegan2_ada_ffhq = global_state["pretrained_weight"]
|
447 |
-
paths_config.dlib = (model_dir / "align.dat").as_posix()
|
448 |
-
run_name = str(uuid.uuid4())
|
449 |
-
new_G, w_pivot = run_PTI(new_img, run_name)
|
450 |
-
|
451 |
-
out_path = Path(f"checkpoints/stylegan2-{run_name}.pkl")
|
452 |
-
print(f"Exporting to {out_path}")
|
453 |
-
export_updated_pickle(new_G, out_path, run_name)
|
454 |
-
global_state["w_pivot"] = w_pivot
|
455 |
-
global_state["pretrained_weight"] = str(out_path)
|
456 |
-
init_images(global_state)
|
457 |
-
clear_state(global_state)
|
458 |
-
|
459 |
-
return (
|
460 |
-
global_state,
|
461 |
-
global_state["images"]["image_show"],
|
462 |
-
gr.Image.update(interactive=True),
|
463 |
-
)
|
464 |
-
|
465 |
-
form_custom_image.upload(
|
466 |
-
on_image_change,
|
467 |
-
[form_custom_image, global_state],
|
468 |
-
[global_state, form_image, form_reset_custom_image],
|
469 |
-
)
|
470 |
-
|
471 |
-
form_reset_custom_image.click(
|
472 |
-
on_click_reset_custom_image, [global_state], [global_state, form_image]
|
473 |
-
)
|
474 |
-
|
475 |
-
form_reset_image.click(
|
476 |
-
on_click_reset_image,
|
477 |
-
inputs=[global_state],
|
478 |
-
outputs=[global_state, form_image],
|
479 |
-
queue=False,
|
480 |
-
show_progress=True,
|
481 |
-
)
|
482 |
-
|
483 |
-
# Update parameters
|
484 |
-
def on_change_update_image_seed(seed, global_state):
|
485 |
-
"""Function to handle generation seed change.
|
486 |
-
1. Set seed to global_state
|
487 |
-
2. Re-init images and clear all states
|
488 |
-
"""
|
489 |
-
|
490 |
-
global_state["params"]["seed"] = int(seed)
|
491 |
-
init_images(global_state)
|
492 |
-
clear_state(global_state)
|
493 |
-
|
494 |
-
return global_state, global_state["images"]["image_show"]
|
495 |
-
|
496 |
-
form_seed_number.change(
|
497 |
-
on_change_update_image_seed,
|
498 |
-
inputs=[form_seed_number, global_state],
|
499 |
-
outputs=[global_state, form_image],
|
500 |
-
)
|
501 |
-
|
502 |
-
def on_click_latent_space(latent_space, global_state):
|
503 |
-
"""Function to reset latent space to optimize.
|
504 |
-
NOTE: this function we reset the image and all controls
|
505 |
-
1. Set latent-space to global_state
|
506 |
-
2. Re-init images and clear all state
|
507 |
-
"""
|
508 |
-
|
509 |
-
global_state["params"]["latent_space"] = latent_space
|
510 |
-
init_images(global_state)
|
511 |
-
clear_state(global_state)
|
512 |
-
|
513 |
-
return global_state, global_state["images"]["image_show"]
|
514 |
-
|
515 |
-
form_latent_space.change(
|
516 |
-
on_click_latent_space,
|
517 |
-
inputs=[form_latent_space, global_state],
|
518 |
-
outputs=[global_state, form_image],
|
519 |
-
)
|
520 |
-
|
521 |
-
# ==== Params
|
522 |
-
form_lambda_number.change(
|
523 |
-
partial(on_change_single_global_state, ["params", "motion_lambda"]),
|
524 |
-
inputs=[form_lambda_number, global_state],
|
525 |
-
outputs=[global_state],
|
526 |
-
)
|
527 |
-
|
528 |
-
def on_change_lr(lr, global_state):
|
529 |
-
if lr == 0:
|
530 |
-
print("lr is 0, do nothing.")
|
531 |
-
return global_state
|
532 |
-
else:
|
533 |
-
global_state["params"]["lr"] = lr
|
534 |
-
renderer = global_state["renderer"]
|
535 |
-
renderer.update_lr(lr)
|
536 |
-
print("New optimizer: ")
|
537 |
-
print(renderer.w_optim)
|
538 |
-
return global_state
|
539 |
-
|
540 |
-
form_lr_number.change(
|
541 |
-
on_change_lr,
|
542 |
-
inputs=[form_lr_number, global_state],
|
543 |
-
outputs=[global_state],
|
544 |
-
queue=False,
|
545 |
-
show_progress=True,
|
546 |
-
)
|
547 |
-
|
548 |
-
def on_click_start(global_state, image):
|
549 |
-
p_in_pixels = []
|
550 |
-
t_in_pixels = []
|
551 |
-
valid_points = []
|
552 |
-
|
553 |
-
# handle of start drag in mask editing mode
|
554 |
-
global_state = preprocess_mask_info(global_state, image)
|
555 |
-
|
556 |
-
# Prepare the points for the inference
|
557 |
-
if len(global_state["points"]) == 0:
|
558 |
-
# yield on_click_start_wo_points(global_state, image)
|
559 |
-
image_raw = global_state["images"]["image_raw"]
|
560 |
-
update_image_draw(
|
561 |
-
image_raw,
|
562 |
-
global_state["points"],
|
563 |
-
global_state["mask"],
|
564 |
-
global_state["show_mask"],
|
565 |
-
global_state,
|
566 |
-
)
|
567 |
-
|
568 |
-
yield (
|
569 |
-
global_state, # global_state
|
570 |
-
0, # form_steps_number,
|
571 |
-
global_state["images"]["image_show"], # form image
|
572 |
-
gr.Button.update(interactive=True), # form_reset_image
|
573 |
-
gr.Button.update(interactive=True), # add points button
|
574 |
-
gr.Button.update(interactive=True), # enable mask button
|
575 |
-
gr.Button.update(interactive=True), # undo points button
|
576 |
-
gr.Button.update(interactive=True), # reset mask button
|
577 |
-
gr.Radio.update(interactive=True), # latent space
|
578 |
-
gr.Button.update(interactive=True), # start button
|
579 |
-
gr.Button.update(interactive=False), # stop button
|
580 |
-
gr.Number.update(interactive=True), # form_seed_number
|
581 |
-
gr.Number.update(interactive=True), # form_lr_number
|
582 |
-
gr.Checkbox.update(interactive=True), # show_mask
|
583 |
-
gr.Number.update(interactive=True), # form_lambda_number
|
584 |
-
gr.Button.update(interactive=True), # form_reset_custom_image
|
585 |
-
)
|
586 |
-
else:
|
587 |
-
# Transform the points into torch tensors
|
588 |
-
for key_point, point in global_state["points"].items():
|
589 |
-
try:
|
590 |
-
p_start = point.get("start_temp", point["start"])
|
591 |
-
p_end = point["target"]
|
592 |
-
|
593 |
-
if p_start is None or p_end is None:
|
594 |
-
continue
|
595 |
-
|
596 |
-
except KeyError:
|
597 |
-
continue
|
598 |
-
|
599 |
-
p_in_pixels.append(p_start)
|
600 |
-
t_in_pixels.append(p_end)
|
601 |
-
valid_points.append(key_point)
|
602 |
-
|
603 |
-
mask = torch.tensor(global_state["mask"]).float()
|
604 |
-
drag_mask = 1 - mask
|
605 |
-
|
606 |
-
renderer: Renderer = global_state["renderer"]
|
607 |
-
global_state["temporal_params"]["stop"] = False
|
608 |
-
global_state["editing_state"] = "running"
|
609 |
-
|
610 |
-
# reverse points order
|
611 |
-
p_to_opt = reverse_point_pairs(p_in_pixels)
|
612 |
-
t_to_opt = reverse_point_pairs(t_in_pixels)
|
613 |
-
print("Running with:")
|
614 |
-
print(f" Source: {p_in_pixels}")
|
615 |
-
print(f" Target: {t_in_pixels}")
|
616 |
-
step_idx = 0
|
617 |
-
last_time = time.time()
|
618 |
-
while True:
|
619 |
-
print_memory_usage()
|
620 |
-
# add a TIMEOUT break
|
621 |
-
print(f"Running time: {time.time() - last_time}")
|
622 |
-
if IS_SPACE and time.time() - last_time > TIMEOUT:
|
623 |
-
print("Timeout break!")
|
624 |
-
break
|
625 |
-
if (
|
626 |
-
global_state["temporal_params"]["stop"]
|
627 |
-
or global_state["generator_params"]["stop"]
|
628 |
-
):
|
629 |
-
break
|
630 |
-
|
631 |
-
# do drage here!
|
632 |
-
renderer._render_drag_impl(
|
633 |
-
global_state["generator_params"],
|
634 |
-
p_to_opt, # point
|
635 |
-
t_to_opt, # target
|
636 |
-
drag_mask, # mask,
|
637 |
-
global_state["params"]["motion_lambda"], # lambda_mask
|
638 |
-
reg=0,
|
639 |
-
feature_idx=5, # NOTE: do not support change for now
|
640 |
-
r1=global_state["params"]["r1_in_pixels"], # r1
|
641 |
-
r2=global_state["params"]["r2_in_pixels"], # r2
|
642 |
-
# random_seed = 0,
|
643 |
-
# noise_mode = 'const',
|
644 |
-
trunc_psi=global_state["params"]["trunc_psi"],
|
645 |
-
# force_fp32 = False,
|
646 |
-
# layer_name = None,
|
647 |
-
# sel_channels = 3,
|
648 |
-
# base_channel = 0,
|
649 |
-
# img_scale_db = 0,
|
650 |
-
# img_normalize = False,
|
651 |
-
# untransform = False,
|
652 |
-
is_drag=True,
|
653 |
-
to_pil=True,
|
654 |
-
)
|
655 |
-
|
656 |
-
if step_idx % global_state["draw_interval"] == 0:
|
657 |
-
print("Current Source:")
|
658 |
-
for key_point, p_i, t_i in zip(valid_points, p_to_opt, t_to_opt):
|
659 |
-
global_state["points"][key_point]["start_temp"] = [
|
660 |
-
p_i[1],
|
661 |
-
p_i[0],
|
662 |
-
]
|
663 |
-
global_state["points"][key_point]["target"] = [
|
664 |
-
t_i[1],
|
665 |
-
t_i[0],
|
666 |
-
]
|
667 |
-
start_temp = global_state["points"][key_point]["start_temp"]
|
668 |
-
print(f" {start_temp}")
|
669 |
-
|
670 |
-
image_result = global_state["generator_params"]["image"]
|
671 |
-
image_draw = update_image_draw(
|
672 |
-
image_result,
|
673 |
-
global_state["points"],
|
674 |
-
global_state["mask"],
|
675 |
-
global_state["show_mask"],
|
676 |
-
global_state,
|
677 |
-
)
|
678 |
-
global_state["images"]["image_raw"] = image_result
|
679 |
-
|
680 |
-
yield (
|
681 |
-
global_state, # global_state
|
682 |
-
step_idx, # form_steps_number,
|
683 |
-
global_state["images"]["image_show"], # form image
|
684 |
-
# gr.File.update(visible=False),
|
685 |
-
gr.Button.update(interactive=False), # form_reset_image
|
686 |
-
gr.Button.update(interactive=False), # add points button
|
687 |
-
gr.Button.update(interactive=False), # enable mask button
|
688 |
-
gr.Button.update(interactive=False), # undo points button
|
689 |
-
gr.Button.update(interactive=False), # reset mask button
|
690 |
-
# latent space
|
691 |
-
gr.Radio.update(interactive=False), # latent space
|
692 |
-
gr.Button.update(interactive=False), # start button
|
693 |
-
# enable stop button in loop
|
694 |
-
gr.Button.update(interactive=True), # stop button
|
695 |
-
# update other comps
|
696 |
-
gr.Number.update(interactive=False), # form_seed_number
|
697 |
-
gr.Number.update(interactive=False), # form_lr_number
|
698 |
-
gr.Checkbox.update(interactive=False), # show_mask
|
699 |
-
gr.Number.update(interactive=False), # form_lambda_number
|
700 |
-
gr.Button.update(interactive=False), # form_reset_custom_image
|
701 |
-
)
|
702 |
-
|
703 |
-
# increate step
|
704 |
-
step_idx += 1
|
705 |
-
|
706 |
-
image_result = global_state["generator_params"]["image"]
|
707 |
-
global_state["images"]["image_raw"] = image_result
|
708 |
-
image_draw = update_image_draw(
|
709 |
-
image_result,
|
710 |
-
global_state["points"],
|
711 |
-
global_state["mask"],
|
712 |
-
global_state["show_mask"],
|
713 |
-
global_state,
|
714 |
-
)
|
715 |
-
|
716 |
-
# fp = NamedTemporaryFile(suffix=".png", delete=False)
|
717 |
-
# image_result.save(fp, "PNG")
|
718 |
-
|
719 |
-
global_state["editing_state"] = "add_points"
|
720 |
-
|
721 |
-
yield (
|
722 |
-
global_state, # global_state
|
723 |
-
0, # reset step to 0 after stop. # form_steps_number,
|
724 |
-
global_state["images"]["image_show"], # form image
|
725 |
-
gr.Button.update(interactive=True), # form_reset_image
|
726 |
-
gr.Button.update(interactive=True), # add points button
|
727 |
-
gr.Button.update(interactive=True), # enable mask button
|
728 |
-
gr.Button.update(interactive=True), # undo points button
|
729 |
-
gr.Button.update(interactive=True), # reset mask button
|
730 |
-
gr.Radio.update(interactive=True), # latent space
|
731 |
-
gr.Button.update(interactive=True), # start button
|
732 |
-
gr.Button.update(interactive=False), # stop button
|
733 |
-
gr.Number.update(interactive=True), # form_seed_number
|
734 |
-
gr.Number.update(interactive=True), # form_lr_number
|
735 |
-
gr.Checkbox.update(interactive=True), # show_mask
|
736 |
-
gr.Number.update(interactive=True), # form_lambda_number
|
737 |
-
gr.Button.update(interactive=True), # form_reset_custom_image
|
738 |
-
)
|
739 |
-
|
740 |
-
form_start_btn.click(
|
741 |
-
on_click_start,
|
742 |
-
inputs=[global_state, form_image],
|
743 |
-
outputs=[
|
744 |
-
global_state,
|
745 |
-
form_steps_number,
|
746 |
-
form_image,
|
747 |
-
# form_download_result_file,
|
748 |
-
# >>> buttons
|
749 |
-
form_reset_image,
|
750 |
-
enable_add_points,
|
751 |
-
enable_add_mask,
|
752 |
-
undo_points,
|
753 |
-
form_reset_mask_btn,
|
754 |
-
form_latent_space,
|
755 |
-
form_start_btn,
|
756 |
-
form_stop_btn,
|
757 |
-
# <<< buttonm
|
758 |
-
# >>> inputs comps
|
759 |
-
form_seed_number,
|
760 |
-
form_lr_number,
|
761 |
-
show_mask,
|
762 |
-
form_lambda_number,
|
763 |
-
form_reset_custom_image,
|
764 |
-
],
|
765 |
-
)
|
766 |
-
|
767 |
-
def on_click_stop(global_state):
|
768 |
-
"""Function to handle stop button is clicked.
|
769 |
-
1. send a stop signal by set global_state["temporal_params"]["stop"] as True
|
770 |
-
2. Disable Stop button
|
771 |
-
"""
|
772 |
-
global_state["temporal_params"]["stop"] = True
|
773 |
-
|
774 |
-
return global_state, gr.Button.update(interactive=False)
|
775 |
-
|
776 |
-
form_stop_btn.click(
|
777 |
-
on_click_stop,
|
778 |
-
inputs=[global_state],
|
779 |
-
outputs=[global_state, form_stop_btn],
|
780 |
-
queue=False,
|
781 |
-
show_progress=True,
|
782 |
-
)
|
783 |
-
|
784 |
-
form_draw_interval_number.change(
|
785 |
-
partial(
|
786 |
-
on_change_single_global_state,
|
787 |
-
"draw_interval",
|
788 |
-
map_transform=lambda x: int(x),
|
789 |
-
),
|
790 |
-
inputs=[form_draw_interval_number, global_state],
|
791 |
-
outputs=[global_state],
|
792 |
-
queue=False,
|
793 |
-
show_progress=True,
|
794 |
-
)
|
795 |
-
|
796 |
-
def on_click_remove_point(global_state):
|
797 |
-
choice = global_state["curr_point"]
|
798 |
-
del global_state["points"][choice]
|
799 |
-
|
800 |
-
choices = list(global_state["points"].keys())
|
801 |
-
|
802 |
-
if len(choices) > 0:
|
803 |
-
global_state["curr_point"] = choices[0]
|
804 |
-
|
805 |
-
return (
|
806 |
-
gr.Dropdown.update(choices=choices, value=choices[0]),
|
807 |
-
global_state,
|
808 |
-
)
|
809 |
-
|
810 |
-
# Mask
|
811 |
-
def on_click_reset_mask(global_state):
|
812 |
-
global_state["mask"] = np.ones(
|
813 |
-
(
|
814 |
-
global_state["images"]["image_raw"].size[1],
|
815 |
-
global_state["images"]["image_raw"].size[0],
|
816 |
-
),
|
817 |
-
dtype=np.uint8,
|
818 |
-
)
|
819 |
-
image_draw = update_image_draw(
|
820 |
-
global_state["images"]["image_raw"],
|
821 |
-
global_state["points"],
|
822 |
-
global_state["mask"],
|
823 |
-
global_state["show_mask"],
|
824 |
-
global_state,
|
825 |
-
)
|
826 |
-
return global_state, gr.Image.update(value=image_draw, interactive=False)
|
827 |
-
|
828 |
-
form_reset_mask_btn.click(
|
829 |
-
on_click_reset_mask,
|
830 |
-
inputs=[global_state],
|
831 |
-
outputs=[global_state, form_image],
|
832 |
-
)
|
833 |
-
|
834 |
-
# Image
|
835 |
-
def on_click_enable_draw(global_state, image):
|
836 |
-
"""Function to start add mask mode.
|
837 |
-
1. Preprocess mask info from last state
|
838 |
-
2. Change editing state to add_mask
|
839 |
-
3. Set curr image with points and mask
|
840 |
-
"""
|
841 |
-
global_state = preprocess_mask_info(global_state, image)
|
842 |
-
global_state["editing_state"] = "add_mask"
|
843 |
-
image_raw = global_state["images"]["image_raw"]
|
844 |
-
image_draw = update_image_draw(
|
845 |
-
image_raw, global_state["points"], global_state["mask"], True, global_state
|
846 |
-
)
|
847 |
-
return (
|
848 |
-
global_state,
|
849 |
-
gr.Image.update(value=image_draw, interactive=True),
|
850 |
-
)
|
851 |
-
|
852 |
-
def on_click_remove_draw(global_state, image):
|
853 |
-
"""Function to start remove mask mode.
|
854 |
-
1. Preprocess mask info from last state
|
855 |
-
2. Change editing state to remove_mask
|
856 |
-
3. Set curr image with points and mask
|
857 |
-
"""
|
858 |
-
global_state = preprocess_mask_info(global_state, image)
|
859 |
-
global_state["edinting_state"] = "remove_mask"
|
860 |
-
image_raw = global_state["images"]["image_raw"]
|
861 |
-
image_draw = update_image_draw(
|
862 |
-
image_raw, global_state["points"], global_state["mask"], True, global_state
|
863 |
-
)
|
864 |
-
return (
|
865 |
-
global_state,
|
866 |
-
gr.Image.update(value=image_draw, interactive=True),
|
867 |
-
)
|
868 |
-
|
869 |
-
enable_add_mask.click(
|
870 |
-
on_click_enable_draw,
|
871 |
-
inputs=[global_state, form_image],
|
872 |
-
outputs=[
|
873 |
-
global_state,
|
874 |
-
form_image,
|
875 |
-
],
|
876 |
-
queue=False,
|
877 |
-
show_progress=True,
|
878 |
-
)
|
879 |
-
|
880 |
-
def on_click_add_point(global_state, image: dict):
|
881 |
-
"""Function switch from add mask mode to add points mode.
|
882 |
-
1. Updaste mask buffer if need
|
883 |
-
2. Change global_state['editing_state'] to 'add_points'
|
884 |
-
3. Set current image with mask
|
885 |
-
"""
|
886 |
-
|
887 |
-
global_state = preprocess_mask_info(global_state, image)
|
888 |
-
global_state["editing_state"] = "add_points"
|
889 |
-
mask = global_state["mask"]
|
890 |
-
image_raw = global_state["images"]["image_raw"]
|
891 |
-
image_draw = update_image_draw(
|
892 |
-
image_raw,
|
893 |
-
global_state["points"],
|
894 |
-
mask,
|
895 |
-
global_state["show_mask"],
|
896 |
-
global_state,
|
897 |
-
)
|
898 |
-
|
899 |
-
return (
|
900 |
-
global_state,
|
901 |
-
gr.Image.update(value=image_draw, interactive=False),
|
902 |
-
)
|
903 |
-
|
904 |
-
enable_add_points.click(
|
905 |
-
on_click_add_point,
|
906 |
-
inputs=[global_state, form_image],
|
907 |
-
outputs=[global_state, form_image],
|
908 |
-
queue=False,
|
909 |
-
show_progress=True,
|
910 |
-
)
|
911 |
-
|
912 |
-
def on_click_image(global_state, evt: gr.SelectData):
|
913 |
-
"""This function only support click for point selection"""
|
914 |
-
xy = evt.index
|
915 |
-
if global_state["editing_state"] != "add_points":
|
916 |
-
print(f'In {global_state["editing_state"]} state. ' "Do not add points.")
|
917 |
-
|
918 |
-
return global_state, global_state["images"]["image_show"]
|
919 |
-
|
920 |
-
points = global_state["points"]
|
921 |
-
|
922 |
-
point_idx = get_latest_points_pair(points)
|
923 |
-
if point_idx is None:
|
924 |
-
points[0] = {"start": xy, "target": None}
|
925 |
-
print(f"Click Image - Start - {xy}")
|
926 |
-
elif points[point_idx].get("target", None) is None:
|
927 |
-
points[point_idx]["target"] = xy
|
928 |
-
print(f"Click Image - Target - {xy}")
|
929 |
-
else:
|
930 |
-
points[point_idx + 1] = {"start": xy, "target": None}
|
931 |
-
print(f"Click Image - Start - {xy}")
|
932 |
-
|
933 |
-
image_raw = global_state["images"]["image_raw"]
|
934 |
-
image_draw = update_image_draw(
|
935 |
-
image_raw,
|
936 |
-
global_state["points"],
|
937 |
-
global_state["mask"],
|
938 |
-
global_state["show_mask"],
|
939 |
-
global_state,
|
940 |
-
)
|
941 |
-
|
942 |
-
return global_state, image_draw
|
943 |
-
|
944 |
-
form_image.select(
|
945 |
-
on_click_image,
|
946 |
-
inputs=[global_state],
|
947 |
-
outputs=[global_state, form_image],
|
948 |
-
queue=False,
|
949 |
-
show_progress=True,
|
950 |
-
)
|
951 |
-
|
952 |
-
def on_click_clear_points(global_state):
|
953 |
-
"""Function to handle clear all control points
|
954 |
-
1. clear global_state['points'] (clear_state)
|
955 |
-
2. re-init network
|
956 |
-
2. re-draw image
|
957 |
-
"""
|
958 |
-
clear_state(global_state, target="point")
|
959 |
-
|
960 |
-
renderer: Renderer = global_state["renderer"]
|
961 |
-
renderer.feat_refs = None
|
962 |
-
|
963 |
-
image_raw = global_state["images"]["image_raw"]
|
964 |
-
image_draw = update_image_draw(
|
965 |
-
image_raw, {}, global_state["mask"], global_state["show_mask"], global_state
|
966 |
-
)
|
967 |
-
return global_state, image_draw
|
968 |
-
|
969 |
-
undo_points.click(
|
970 |
-
on_click_clear_points,
|
971 |
-
inputs=[global_state],
|
972 |
-
outputs=[global_state, form_image],
|
973 |
-
queue=False,
|
974 |
-
show_progress=True,
|
975 |
-
)
|
976 |
-
|
977 |
-
def on_click_show_mask(global_state, show_mask):
|
978 |
-
"""Function to control whether show mask on image."""
|
979 |
-
global_state["show_mask"] = show_mask
|
980 |
-
|
981 |
-
image_raw = global_state["images"]["image_raw"]
|
982 |
-
image_draw = update_image_draw(
|
983 |
-
image_raw,
|
984 |
-
global_state["points"],
|
985 |
-
global_state["mask"],
|
986 |
-
global_state["show_mask"],
|
987 |
-
global_state,
|
988 |
-
)
|
989 |
-
return global_state, image_draw
|
990 |
-
|
991 |
-
show_mask.change(
|
992 |
-
on_click_show_mask,
|
993 |
-
inputs=[global_state, show_mask],
|
994 |
-
outputs=[global_state, form_image],
|
995 |
-
queue=False,
|
996 |
-
show_progress=True,
|
997 |
-
)
|
998 |
-
|
999 |
-
# print("SHAReD: Start app", parser.parse_args())
|
1000 |
-
gr.close_all()
|
1001 |
-
app.queue(concurrency_count=1, max_size=200, api_open=False)
|
1002 |
-
app.launch(show_api=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AndreLie95/Diabetes_Risk_Prediction/app.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import pandas as pd
|
3 |
-
import joblib
|
4 |
-
import streamlit as st
|
5 |
-
|
6 |
-
with open('pipeline_clf.pkl', 'rb') as file_1:
|
7 |
-
clf = joblib.load(file_1)
|
8 |
-
|
9 |
-
st.title('Prediksi Resiko Penyakit Diabetes')
|
10 |
-
umur = st.number_input('Umur Saat Ini :', 18, 100)
|
11 |
-
if (umur >= 18) & (umur <= 24):
|
12 |
-
Age = 1
|
13 |
-
elif (umur >= 25) & (umur <= 34):
|
14 |
-
Age = 2
|
15 |
-
elif (umur >= 35) & (umur <= 44):
|
16 |
-
Age = 3
|
17 |
-
elif (umur >= 45) & (umur <= 54):
|
18 |
-
Age = 4
|
19 |
-
elif (umur >= 55) & (umur <= 64):
|
20 |
-
Age = 5
|
21 |
-
elif (umur >= 65) & (umur <= 74):
|
22 |
-
Age = 6
|
23 |
-
elif (umur >= 75) :
|
24 |
-
Age = 7
|
25 |
-
|
26 |
-
tinggi_badan = st.number_input('Masukkan Tinggi Badan(cm) Anda :', 1,250)
|
27 |
-
berat_badan = st.number_input('Masukkan Berat Badan(Kg) Anda :', 1,200)
|
28 |
-
BMI = berat_badan/((tinggi_badan/100)**2)
|
29 |
-
|
30 |
-
Kolestrol = st.radio('Apakah Anda Memiliki Riwayat Kolestrol Tinggi ?', ('Tidak', 'Ya'))
|
31 |
-
if Kolestrol == 'Tidak':
|
32 |
-
HighChol = 0
|
33 |
-
else:
|
34 |
-
HighChol = 1
|
35 |
-
|
36 |
-
Penyakit_Jantung = st.radio('Apakah Anda Memiliki Riwayat Penyakit Jantung ?', ('Tidak', 'Ya'))
|
37 |
-
if Penyakit_Jantung == 'Tidak':
|
38 |
-
HeartDiseaseorAttack = 0
|
39 |
-
else:
|
40 |
-
HeartDiseaseorAttack = 1
|
41 |
-
|
42 |
-
Tensi = st.radio('Apakah Anda Memiliki Riwayat Hipertensi ?', ('Tidak', 'Ya'))
|
43 |
-
if Tensi == 'Tidak':
|
44 |
-
HighBP = 0
|
45 |
-
else:
|
46 |
-
HighBP = 1
|
47 |
-
|
48 |
-
Jalan = st.radio('Apakah Anda Memiliki Kesulitan Berjalan ?', ('Tidak', 'Ya'))
|
49 |
-
if Jalan == 'Tidak':
|
50 |
-
DiffWalk = 0
|
51 |
-
else:
|
52 |
-
DiffWalk = 1
|
53 |
-
|
54 |
-
kesehatan = st.selectbox('Bagaimana Kondisi Kesehatan Anda Secara Umum ?', ('Luar Biasa', 'Sangat Baik', 'Baik', 'Kurang Baik', 'Jelek'))
|
55 |
-
if kesehatan == 'Luar Biasa':
|
56 |
-
GenHlth = 1
|
57 |
-
elif kesehatan == 'Sangat Baik':
|
58 |
-
GenHlth = 2
|
59 |
-
elif kesehatan == 'Baik':
|
60 |
-
GenHlth = 3
|
61 |
-
elif kesehatan == 'Kurang Baik':
|
62 |
-
GenHlth = 4
|
63 |
-
elif kesehatan == 'Jelek':
|
64 |
-
GenHlth = 5
|
65 |
-
|
66 |
-
if st.button('Prediksi'):
|
67 |
-
data_inf = pd.DataFrame({'Age' : Age,
|
68 |
-
'BMI' : BMI,
|
69 |
-
'HighChol' : HighChol,
|
70 |
-
'HeartDiseaseorAttack' : HeartDiseaseorAttack,
|
71 |
-
'HighBP' : HighBP,
|
72 |
-
'DiffWalk' : DiffWalk,
|
73 |
-
'GenHlth' : GenHlth
|
74 |
-
},index=[0])
|
75 |
-
hasil = 'Anda Tidak Memiliki Resiko Diabetes' if clf.predict(data_inf) == 0 else 'Anda Beresiko Diabetes'
|
76 |
-
st.header(hasil)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_demo/uniformer.py
DELETED
@@ -1,366 +0,0 @@
|
|
1 |
-
from collections import OrderedDict
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
from functools import partial
|
5 |
-
from timm.models.vision_transformer import _cfg
|
6 |
-
from timm.models.registry import register_model
|
7 |
-
from timm.models.layers import trunc_normal_, DropPath, to_2tuple
|
8 |
-
|
9 |
-
layer_scale = False
|
10 |
-
init_value = 1e-6
|
11 |
-
|
12 |
-
|
13 |
-
class Mlp(nn.Module):
|
14 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
15 |
-
super().__init__()
|
16 |
-
out_features = out_features or in_features
|
17 |
-
hidden_features = hidden_features or in_features
|
18 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
19 |
-
self.act = act_layer()
|
20 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
21 |
-
self.drop = nn.Dropout(drop)
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
x = self.fc1(x)
|
25 |
-
x = self.act(x)
|
26 |
-
x = self.drop(x)
|
27 |
-
x = self.fc2(x)
|
28 |
-
x = self.drop(x)
|
29 |
-
return x
|
30 |
-
|
31 |
-
|
32 |
-
class CMlp(nn.Module):
|
33 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
34 |
-
super().__init__()
|
35 |
-
out_features = out_features or in_features
|
36 |
-
hidden_features = hidden_features or in_features
|
37 |
-
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
|
38 |
-
self.act = act_layer()
|
39 |
-
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
|
40 |
-
self.drop = nn.Dropout(drop)
|
41 |
-
|
42 |
-
def forward(self, x):
|
43 |
-
x = self.fc1(x)
|
44 |
-
x = self.act(x)
|
45 |
-
x = self.drop(x)
|
46 |
-
x = self.fc2(x)
|
47 |
-
x = self.drop(x)
|
48 |
-
return x
|
49 |
-
|
50 |
-
|
51 |
-
class Attention(nn.Module):
|
52 |
-
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
53 |
-
super().__init__()
|
54 |
-
self.num_heads = num_heads
|
55 |
-
head_dim = dim // num_heads
|
56 |
-
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
57 |
-
self.scale = qk_scale or head_dim ** -0.5
|
58 |
-
|
59 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
60 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
61 |
-
self.proj = nn.Linear(dim, dim)
|
62 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
63 |
-
|
64 |
-
def forward(self, x):
|
65 |
-
B, N, C = x.shape
|
66 |
-
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
67 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
68 |
-
|
69 |
-
attn = (q @ k.transpose(-2, -1)) * self.scale
|
70 |
-
attn = attn.softmax(dim=-1)
|
71 |
-
attn = self.attn_drop(attn)
|
72 |
-
|
73 |
-
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
74 |
-
x = self.proj(x)
|
75 |
-
x = self.proj_drop(x)
|
76 |
-
return x
|
77 |
-
|
78 |
-
|
79 |
-
class CBlock(nn.Module):
|
80 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
81 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
82 |
-
super().__init__()
|
83 |
-
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
|
84 |
-
self.norm1 = nn.BatchNorm2d(dim)
|
85 |
-
self.conv1 = nn.Conv2d(dim, dim, 1)
|
86 |
-
self.conv2 = nn.Conv2d(dim, dim, 1)
|
87 |
-
self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
|
88 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
89 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
90 |
-
self.norm2 = nn.BatchNorm2d(dim)
|
91 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
92 |
-
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
93 |
-
|
94 |
-
def forward(self, x):
|
95 |
-
x = x + self.pos_embed(x)
|
96 |
-
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
|
97 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
98 |
-
return x
|
99 |
-
|
100 |
-
|
101 |
-
class SABlock(nn.Module):
|
102 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
103 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
104 |
-
super().__init__()
|
105 |
-
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
|
106 |
-
self.norm1 = norm_layer(dim)
|
107 |
-
self.attn = Attention(
|
108 |
-
dim,
|
109 |
-
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
110 |
-
attn_drop=attn_drop, proj_drop=drop)
|
111 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
112 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
113 |
-
self.norm2 = norm_layer(dim)
|
114 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
115 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
116 |
-
global layer_scale
|
117 |
-
self.ls = layer_scale
|
118 |
-
if self.ls:
|
119 |
-
global init_value
|
120 |
-
print(f"Use layer_scale: {layer_scale}, init_values: {init_value}")
|
121 |
-
self.gamma_1 = nn.Parameter(init_value * torch.ones((dim)),requires_grad=True)
|
122 |
-
self.gamma_2 = nn.Parameter(init_value * torch.ones((dim)),requires_grad=True)
|
123 |
-
|
124 |
-
def forward(self, x):
|
125 |
-
x = x + self.pos_embed(x)
|
126 |
-
B, N, H, W = x.shape
|
127 |
-
x = x.flatten(2).transpose(1, 2)
|
128 |
-
if self.ls:
|
129 |
-
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
|
130 |
-
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
131 |
-
else:
|
132 |
-
x = x + self.drop_path(self.attn(self.norm1(x)))
|
133 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
134 |
-
x = x.transpose(1, 2).reshape(B, N, H, W)
|
135 |
-
return x
|
136 |
-
|
137 |
-
|
138 |
-
class head_embedding(nn.Module):
|
139 |
-
def __init__(self, in_channels, out_channels):
|
140 |
-
super(head_embedding, self).__init__()
|
141 |
-
|
142 |
-
self.proj = nn.Sequential(
|
143 |
-
nn.Conv2d(in_channels, out_channels // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
|
144 |
-
nn.BatchNorm2d(out_channels // 2),
|
145 |
-
nn.GELU(),
|
146 |
-
nn.Conv2d(out_channels // 2, out_channels, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
|
147 |
-
nn.BatchNorm2d(out_channels),
|
148 |
-
)
|
149 |
-
|
150 |
-
def forward(self, x):
|
151 |
-
x = self.proj(x)
|
152 |
-
return x
|
153 |
-
|
154 |
-
|
155 |
-
class middle_embedding(nn.Module):
|
156 |
-
def __init__(self, in_channels, out_channels):
|
157 |
-
super(middle_embedding, self).__init__()
|
158 |
-
|
159 |
-
self.proj = nn.Sequential(
|
160 |
-
nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
|
161 |
-
nn.BatchNorm2d(out_channels),
|
162 |
-
)
|
163 |
-
|
164 |
-
def forward(self, x):
|
165 |
-
x = self.proj(x)
|
166 |
-
return x
|
167 |
-
|
168 |
-
|
169 |
-
class PatchEmbed(nn.Module):
|
170 |
-
""" Image to Patch Embedding
|
171 |
-
"""
|
172 |
-
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
173 |
-
super().__init__()
|
174 |
-
img_size = to_2tuple(img_size)
|
175 |
-
patch_size = to_2tuple(patch_size)
|
176 |
-
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
|
177 |
-
self.img_size = img_size
|
178 |
-
self.patch_size = patch_size
|
179 |
-
self.num_patches = num_patches
|
180 |
-
self.norm = nn.LayerNorm(embed_dim)
|
181 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
182 |
-
|
183 |
-
def forward(self, x):
|
184 |
-
B, C, H, W = x.shape
|
185 |
-
# FIXME look at relaxing size constraints
|
186 |
-
# assert H == self.img_size[0] and W == self.img_size[1], \
|
187 |
-
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
188 |
-
x = self.proj(x)
|
189 |
-
B, C, H, W = x.shape
|
190 |
-
x = x.flatten(2).transpose(1, 2)
|
191 |
-
x = self.norm(x)
|
192 |
-
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
|
193 |
-
return x
|
194 |
-
|
195 |
-
|
196 |
-
class UniFormer(nn.Module):
|
197 |
-
""" Vision Transformer
|
198 |
-
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
|
199 |
-
https://arxiv.org/abs/2010.11929
|
200 |
-
"""
|
201 |
-
def __init__(self, depth=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=1000, embed_dim=[64, 128, 320, 512],
|
202 |
-
head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
|
203 |
-
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, conv_stem=False):
|
204 |
-
"""
|
205 |
-
Args:
|
206 |
-
depth (list): depth of each stage
|
207 |
-
img_size (int, tuple): input image size
|
208 |
-
in_chans (int): number of input channels
|
209 |
-
num_classes (int): number of classes for classification head
|
210 |
-
embed_dim (list): embedding dimension of each stage
|
211 |
-
head_dim (int): head dimension
|
212 |
-
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
213 |
-
qkv_bias (bool): enable bias for qkv if True
|
214 |
-
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
|
215 |
-
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
|
216 |
-
drop_rate (float): dropout rate
|
217 |
-
attn_drop_rate (float): attention dropout rate
|
218 |
-
drop_path_rate (float): stochastic depth rate
|
219 |
-
norm_layer: (nn.Module): normalization layer
|
220 |
-
conv_stem: (bool): whether use overlapped patch stem
|
221 |
-
"""
|
222 |
-
super().__init__()
|
223 |
-
self.num_classes = num_classes
|
224 |
-
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
225 |
-
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
|
226 |
-
if conv_stem:
|
227 |
-
self.patch_embed1 = head_embedding(in_channels=in_chans, out_channels=embed_dim[0])
|
228 |
-
self.patch_embed2 = middle_embedding(in_channels=embed_dim[0], out_channels=embed_dim[1])
|
229 |
-
self.patch_embed3 = middle_embedding(in_channels=embed_dim[1], out_channels=embed_dim[2])
|
230 |
-
self.patch_embed4 = middle_embedding(in_channels=embed_dim[2], out_channels=embed_dim[3])
|
231 |
-
else:
|
232 |
-
self.patch_embed1 = PatchEmbed(
|
233 |
-
img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
|
234 |
-
self.patch_embed2 = PatchEmbed(
|
235 |
-
img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
|
236 |
-
self.patch_embed3 = PatchEmbed(
|
237 |
-
img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
|
238 |
-
self.patch_embed4 = PatchEmbed(
|
239 |
-
img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
|
240 |
-
|
241 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
242 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] # stochastic depth decay rule
|
243 |
-
num_heads = [dim // head_dim for dim in embed_dim]
|
244 |
-
self.blocks1 = nn.ModuleList([
|
245 |
-
CBlock(
|
246 |
-
dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
247 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
|
248 |
-
for i in range(depth[0])])
|
249 |
-
self.blocks2 = nn.ModuleList([
|
250 |
-
CBlock(
|
251 |
-
dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
252 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]], norm_layer=norm_layer)
|
253 |
-
for i in range(depth[1])])
|
254 |
-
self.blocks3 = nn.ModuleList([
|
255 |
-
SABlock(
|
256 |
-
dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
257 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]], norm_layer=norm_layer)
|
258 |
-
for i in range(depth[2])])
|
259 |
-
self.blocks4 = nn.ModuleList([
|
260 |
-
SABlock(
|
261 |
-
dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
262 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]+depth[2]], norm_layer=norm_layer)
|
263 |
-
for i in range(depth[3])])
|
264 |
-
self.norm = nn.BatchNorm2d(embed_dim[-1])
|
265 |
-
|
266 |
-
# Representation layer
|
267 |
-
if representation_size:
|
268 |
-
self.num_features = representation_size
|
269 |
-
self.pre_logits = nn.Sequential(OrderedDict([
|
270 |
-
('fc', nn.Linear(embed_dim, representation_size)),
|
271 |
-
('act', nn.Tanh())
|
272 |
-
]))
|
273 |
-
else:
|
274 |
-
self.pre_logits = nn.Identity()
|
275 |
-
|
276 |
-
# Classifier head
|
277 |
-
self.head = nn.Linear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
|
278 |
-
|
279 |
-
self.apply(self._init_weights)
|
280 |
-
|
281 |
-
def _init_weights(self, m):
|
282 |
-
if isinstance(m, nn.Linear):
|
283 |
-
trunc_normal_(m.weight, std=.02)
|
284 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
285 |
-
nn.init.constant_(m.bias, 0)
|
286 |
-
elif isinstance(m, nn.LayerNorm):
|
287 |
-
nn.init.constant_(m.bias, 0)
|
288 |
-
nn.init.constant_(m.weight, 1.0)
|
289 |
-
|
290 |
-
@torch.jit.ignore
|
291 |
-
def no_weight_decay(self):
|
292 |
-
return {'pos_embed', 'cls_token'}
|
293 |
-
|
294 |
-
def get_classifier(self):
|
295 |
-
return self.head
|
296 |
-
|
297 |
-
def reset_classifier(self, num_classes, global_pool=''):
|
298 |
-
self.num_classes = num_classes
|
299 |
-
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
300 |
-
|
301 |
-
def forward_features(self, x):
|
302 |
-
B = x.shape[0]
|
303 |
-
x = self.patch_embed1(x)
|
304 |
-
x = self.pos_drop(x)
|
305 |
-
for blk in self.blocks1:
|
306 |
-
x = blk(x)
|
307 |
-
x = self.patch_embed2(x)
|
308 |
-
for blk in self.blocks2:
|
309 |
-
x = blk(x)
|
310 |
-
x = self.patch_embed3(x)
|
311 |
-
for blk in self.blocks3:
|
312 |
-
x = blk(x)
|
313 |
-
x = self.patch_embed4(x)
|
314 |
-
for blk in self.blocks4:
|
315 |
-
x = blk(x)
|
316 |
-
x = self.norm(x)
|
317 |
-
x = self.pre_logits(x)
|
318 |
-
return x
|
319 |
-
|
320 |
-
def forward(self, x):
|
321 |
-
x = self.forward_features(x)
|
322 |
-
x = x.flatten(2).mean(-1)
|
323 |
-
x = self.head(x)
|
324 |
-
return x
|
325 |
-
|
326 |
-
|
327 |
-
@register_model
|
328 |
-
def uniformer_small(pretrained=True, **kwargs):
|
329 |
-
model = UniFormer(
|
330 |
-
depth=[3, 4, 8, 3],
|
331 |
-
embed_dim=[64, 128, 320, 512], head_dim=64, mlp_ratio=4, qkv_bias=True,
|
332 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
333 |
-
model.default_cfg = _cfg()
|
334 |
-
return model
|
335 |
-
|
336 |
-
|
337 |
-
@register_model
|
338 |
-
def uniformer_small_plus(pretrained=True, **kwargs):
|
339 |
-
model = UniFormer(
|
340 |
-
depth=[3, 5, 9, 3], conv_stem=True,
|
341 |
-
embed_dim=[64, 128, 320, 512], head_dim=64, mlp_ratio=4, qkv_bias=True,
|
342 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
343 |
-
model.default_cfg = _cfg()
|
344 |
-
return model
|
345 |
-
|
346 |
-
|
347 |
-
@register_model
|
348 |
-
def uniformer_base(pretrained=True, **kwargs):
|
349 |
-
model = UniFormer(
|
350 |
-
depth=[5, 8, 20, 7],
|
351 |
-
embed_dim=[64, 128, 320, 512], head_dim=64, mlp_ratio=4, qkv_bias=True,
|
352 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
353 |
-
model.default_cfg = _cfg()
|
354 |
-
return model
|
355 |
-
|
356 |
-
|
357 |
-
@register_model
|
358 |
-
def uniformer_base_ls(pretrained=True, **kwargs):
|
359 |
-
global layer_scale
|
360 |
-
layer_scale = True
|
361 |
-
model = UniFormer(
|
362 |
-
depth=[5, 8, 20, 7],
|
363 |
-
embed_dim=[64, 128, 320, 512], head_dim=64, mlp_ratio=4, qkv_bias=True,
|
364 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
365 |
-
model.default_cfg = _cfg()
|
366 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[16, 23])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3plus_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_80k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=60),
|
8 |
-
auxiliary_head=dict(num_classes=60),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
|
10 |
-
optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/midas_net.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
2 |
-
This file contains code that is adapted from
|
3 |
-
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
|
8 |
-
from .base_model import BaseModel
|
9 |
-
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
|
10 |
-
|
11 |
-
|
12 |
-
class MidasNet(BaseModel):
|
13 |
-
"""Network for monocular depth estimation.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, path=None, features=256, non_negative=True):
|
17 |
-
"""Init.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
path (str, optional): Path to saved model. Defaults to None.
|
21 |
-
features (int, optional): Number of features. Defaults to 256.
|
22 |
-
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
23 |
-
"""
|
24 |
-
print("Loading weights: ", path)
|
25 |
-
|
26 |
-
super(MidasNet, self).__init__()
|
27 |
-
|
28 |
-
use_pretrained = False if path is None else True
|
29 |
-
|
30 |
-
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
|
31 |
-
|
32 |
-
self.scratch.refinenet4 = FeatureFusionBlock(features)
|
33 |
-
self.scratch.refinenet3 = FeatureFusionBlock(features)
|
34 |
-
self.scratch.refinenet2 = FeatureFusionBlock(features)
|
35 |
-
self.scratch.refinenet1 = FeatureFusionBlock(features)
|
36 |
-
|
37 |
-
self.scratch.output_conv = nn.Sequential(
|
38 |
-
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
|
39 |
-
Interpolate(scale_factor=2, mode="bilinear"),
|
40 |
-
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
|
41 |
-
nn.ReLU(True),
|
42 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
43 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
44 |
-
)
|
45 |
-
|
46 |
-
if path:
|
47 |
-
self.load(path)
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
"""Forward pass.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
x (tensor): input data (image)
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
tensor: depth
|
57 |
-
"""
|
58 |
-
|
59 |
-
layer_1 = self.pretrained.layer1(x)
|
60 |
-
layer_2 = self.pretrained.layer2(layer_1)
|
61 |
-
layer_3 = self.pretrained.layer3(layer_2)
|
62 |
-
layer_4 = self.pretrained.layer4(layer_3)
|
63 |
-
|
64 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
65 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
66 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
67 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
68 |
-
|
69 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
70 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
71 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
72 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
73 |
-
|
74 |
-
out = self.scratch.output_conv(path_1)
|
75 |
-
|
76 |
-
return torch.squeeze(out, dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/nono/roop/utilities.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import mimetypes
|
3 |
-
import os
|
4 |
-
import platform
|
5 |
-
import shutil
|
6 |
-
import ssl
|
7 |
-
import subprocess
|
8 |
-
import urllib
|
9 |
-
from pathlib import Path
|
10 |
-
from typing import List, Optional
|
11 |
-
from tqdm import tqdm
|
12 |
-
|
13 |
-
import roop.globals
|
14 |
-
|
15 |
-
TEMP_DIRECTORY = 'temp'
|
16 |
-
TEMP_VIDEO_FILE = 'temp.mp4'
|
17 |
-
|
18 |
-
# monkey patch ssl for mac
|
19 |
-
if platform.system().lower() == 'darwin':
|
20 |
-
ssl._create_default_https_context = ssl._create_unverified_context
|
21 |
-
|
22 |
-
|
23 |
-
def run_ffmpeg(args: List[str]) -> bool:
|
24 |
-
commands = ['ffmpeg', '-hide_banner', '-loglevel', roop.globals.log_level]
|
25 |
-
commands.extend(args)
|
26 |
-
try:
|
27 |
-
subprocess.check_output(commands, stderr=subprocess.STDOUT)
|
28 |
-
return True
|
29 |
-
except Exception:
|
30 |
-
pass
|
31 |
-
return False
|
32 |
-
|
33 |
-
|
34 |
-
def detect_fps(target_path: str) -> float:
|
35 |
-
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers=1:nokey=1', target_path]
|
36 |
-
output = subprocess.check_output(command).decode().strip().split('/')
|
37 |
-
try:
|
38 |
-
numerator, denominator = map(int, output)
|
39 |
-
return numerator / denominator
|
40 |
-
except Exception:
|
41 |
-
pass
|
42 |
-
return 30
|
43 |
-
|
44 |
-
|
45 |
-
def extract_frames(target_path: str, fps: float = 30) -> bool:
|
46 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
47 |
-
temp_frame_quality = roop.globals.temp_frame_quality * 31 // 100
|
48 |
-
return run_ffmpeg(['-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_quality), '-pix_fmt', 'rgb24', '-vf', 'fps=' + str(fps), os.path.join(temp_directory_path, '%04d.' + roop.globals.temp_frame_format)])
|
49 |
-
|
50 |
-
|
51 |
-
def create_video(target_path: str, fps: float = 30) -> bool:
|
52 |
-
temp_output_path = get_temp_output_path(target_path)
|
53 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
54 |
-
output_video_quality = (roop.globals.output_video_quality + 1) * 51 // 100
|
55 |
-
commands = ['-hwaccel', 'auto', '-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.' + roop.globals.temp_frame_format), '-c:v', roop.globals.output_video_encoder]
|
56 |
-
if roop.globals.output_video_encoder in ['libx264', 'libx265', 'libvpx']:
|
57 |
-
commands.extend(['-crf', str(output_video_quality)])
|
58 |
-
if roop.globals.output_video_encoder in ['h264_nvenc', 'hevc_nvenc']:
|
59 |
-
commands.extend(['-cq', str(output_video_quality)])
|
60 |
-
commands.extend(['-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625:fast=1', '-y', temp_output_path])
|
61 |
-
return run_ffmpeg(commands)
|
62 |
-
|
63 |
-
|
64 |
-
def restore_audio(target_path: str, output_path: str) -> None:
|
65 |
-
temp_output_path = get_temp_output_path(target_path)
|
66 |
-
done = run_ffmpeg(['-hwaccel', 'auto', '-i', temp_output_path, '-i', target_path, '-c:v', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-y', output_path])
|
67 |
-
if not done:
|
68 |
-
move_temp(target_path, output_path)
|
69 |
-
|
70 |
-
|
71 |
-
def get_temp_frame_paths(target_path: str) -> List[str]:
|
72 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
73 |
-
return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.' + roop.globals.temp_frame_format)))
|
74 |
-
|
75 |
-
|
76 |
-
def get_temp_directory_path(target_path: str) -> str:
|
77 |
-
target_name, _ = os.path.splitext(os.path.basename(target_path))
|
78 |
-
target_directory_path = os.path.dirname(target_path)
|
79 |
-
return os.path.join(target_directory_path, TEMP_DIRECTORY, target_name)
|
80 |
-
|
81 |
-
|
82 |
-
def get_temp_output_path(target_path: str) -> str:
|
83 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
84 |
-
return os.path.join(temp_directory_path, TEMP_VIDEO_FILE)
|
85 |
-
|
86 |
-
|
87 |
-
def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Optional[str]:
|
88 |
-
if source_path and target_path and output_path:
|
89 |
-
source_name, _ = os.path.splitext(os.path.basename(source_path))
|
90 |
-
target_name, target_extension = os.path.splitext(os.path.basename(target_path))
|
91 |
-
if os.path.isdir(output_path):
|
92 |
-
return os.path.join(output_path, source_name + '-' + target_name + target_extension)
|
93 |
-
return output_path
|
94 |
-
|
95 |
-
|
96 |
-
def create_temp(target_path: str) -> None:
|
97 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
98 |
-
Path(temp_directory_path).mkdir(parents=True, exist_ok=True)
|
99 |
-
|
100 |
-
|
101 |
-
def move_temp(target_path: str, output_path: str) -> None:
|
102 |
-
temp_output_path = get_temp_output_path(target_path)
|
103 |
-
if os.path.isfile(temp_output_path):
|
104 |
-
if os.path.isfile(output_path):
|
105 |
-
os.remove(output_path)
|
106 |
-
shutil.move(temp_output_path, output_path)
|
107 |
-
|
108 |
-
|
109 |
-
def clean_temp(target_path: str) -> None:
|
110 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
111 |
-
parent_directory_path = os.path.dirname(temp_directory_path)
|
112 |
-
if not roop.globals.keep_frames and os.path.isdir(temp_directory_path):
|
113 |
-
shutil.rmtree(temp_directory_path)
|
114 |
-
if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
|
115 |
-
os.rmdir(parent_directory_path)
|
116 |
-
|
117 |
-
|
118 |
-
def has_image_extension(image_path: str) -> bool:
|
119 |
-
return image_path.lower().endswith(('png', 'jpg', 'jpeg', 'webp'))
|
120 |
-
|
121 |
-
|
122 |
-
def is_image(image_path: str) -> bool:
|
123 |
-
if image_path and os.path.isfile(image_path):
|
124 |
-
mimetype, _ = mimetypes.guess_type(image_path)
|
125 |
-
return bool(mimetype and mimetype.startswith('image/'))
|
126 |
-
return False
|
127 |
-
|
128 |
-
|
129 |
-
def is_video(video_path: str) -> bool:
|
130 |
-
if video_path and os.path.isfile(video_path):
|
131 |
-
mimetype, _ = mimetypes.guess_type(video_path)
|
132 |
-
return bool(mimetype and mimetype.startswith('video/'))
|
133 |
-
return False
|
134 |
-
|
135 |
-
|
136 |
-
def conditional_download(download_directory_path: str, urls: List[str]) -> None:
|
137 |
-
if not os.path.exists(download_directory_path):
|
138 |
-
os.makedirs(download_directory_path)
|
139 |
-
for url in urls:
|
140 |
-
download_file_path = os.path.join(download_directory_path, os.path.basename(url))
|
141 |
-
if not os.path.exists(download_file_path):
|
142 |
-
request = urllib.request.urlopen(url) # type: ignore[attr-defined]
|
143 |
-
total = int(request.headers.get('Content-Length', 0))
|
144 |
-
with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress:
|
145 |
-
urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
|
146 |
-
|
147 |
-
|
148 |
-
def resolve_relative_path(path: str) -> str:
|
149 |
-
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ariharasudhan/YoloV5/utils/activations.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Activation functions
|
4 |
-
"""
|
5 |
-
|
6 |
-
import torch
|
7 |
-
import torch.nn as nn
|
8 |
-
import torch.nn.functional as F
|
9 |
-
|
10 |
-
|
11 |
-
class SiLU(nn.Module):
|
12 |
-
# SiLU activation https://arxiv.org/pdf/1606.08415.pdf
|
13 |
-
@staticmethod
|
14 |
-
def forward(x):
|
15 |
-
return x * torch.sigmoid(x)
|
16 |
-
|
17 |
-
|
18 |
-
class Hardswish(nn.Module):
|
19 |
-
# Hard-SiLU activation
|
20 |
-
@staticmethod
|
21 |
-
def forward(x):
|
22 |
-
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
|
23 |
-
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
|
24 |
-
|
25 |
-
|
26 |
-
class Mish(nn.Module):
|
27 |
-
# Mish activation https://github.com/digantamisra98/Mish
|
28 |
-
@staticmethod
|
29 |
-
def forward(x):
|
30 |
-
return x * F.softplus(x).tanh()
|
31 |
-
|
32 |
-
|
33 |
-
class MemoryEfficientMish(nn.Module):
|
34 |
-
# Mish activation memory-efficient
|
35 |
-
class F(torch.autograd.Function):
|
36 |
-
|
37 |
-
@staticmethod
|
38 |
-
def forward(ctx, x):
|
39 |
-
ctx.save_for_backward(x)
|
40 |
-
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
|
41 |
-
|
42 |
-
@staticmethod
|
43 |
-
def backward(ctx, grad_output):
|
44 |
-
x = ctx.saved_tensors[0]
|
45 |
-
sx = torch.sigmoid(x)
|
46 |
-
fx = F.softplus(x).tanh()
|
47 |
-
return grad_output * (fx + x * sx * (1 - fx * fx))
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
return self.F.apply(x)
|
51 |
-
|
52 |
-
|
53 |
-
class FReLU(nn.Module):
|
54 |
-
# FReLU activation https://arxiv.org/abs/2007.11824
|
55 |
-
def __init__(self, c1, k=3): # ch_in, kernel
|
56 |
-
super().__init__()
|
57 |
-
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
|
58 |
-
self.bn = nn.BatchNorm2d(c1)
|
59 |
-
|
60 |
-
def forward(self, x):
|
61 |
-
return torch.max(x, self.bn(self.conv(x)))
|
62 |
-
|
63 |
-
|
64 |
-
class AconC(nn.Module):
|
65 |
-
r""" ACON activation (activate or not)
|
66 |
-
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
|
67 |
-
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
|
68 |
-
"""
|
69 |
-
|
70 |
-
def __init__(self, c1):
|
71 |
-
super().__init__()
|
72 |
-
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
73 |
-
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
74 |
-
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
|
75 |
-
|
76 |
-
def forward(self, x):
|
77 |
-
dpx = (self.p1 - self.p2) * x
|
78 |
-
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
|
79 |
-
|
80 |
-
|
81 |
-
class MetaAconC(nn.Module):
|
82 |
-
r""" ACON activation (activate or not)
|
83 |
-
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
|
84 |
-
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
|
85 |
-
"""
|
86 |
-
|
87 |
-
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
|
88 |
-
super().__init__()
|
89 |
-
c2 = max(r, c1 // r)
|
90 |
-
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
91 |
-
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
92 |
-
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
|
93 |
-
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
|
94 |
-
# self.bn1 = nn.BatchNorm2d(c2)
|
95 |
-
# self.bn2 = nn.BatchNorm2d(c1)
|
96 |
-
|
97 |
-
def forward(self, x):
|
98 |
-
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
|
99 |
-
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
|
100 |
-
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
|
101 |
-
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
|
102 |
-
dpx = (self.p1 - self.p2) * x
|
103 |
-
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/White-box-Cartoonization/app.py
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
from __future__ import annotations
|
4 |
-
import argparse
|
5 |
-
import functools
|
6 |
-
import os
|
7 |
-
import pathlib
|
8 |
-
import sys
|
9 |
-
from typing import Callable
|
10 |
-
import uuid
|
11 |
-
|
12 |
-
import gradio as gr
|
13 |
-
import huggingface_hub
|
14 |
-
import numpy as np
|
15 |
-
import PIL.Image
|
16 |
-
|
17 |
-
from io import BytesIO
|
18 |
-
from wbc.cartoonize import Cartoonize
|
19 |
-
|
20 |
-
ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization'
|
21 |
-
TITLE = 'SystemErrorWang/White-box-Cartoonization'
|
22 |
-
DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}.
|
23 |
-
|
24 |
-
"""
|
25 |
-
ARTICLE = """
|
26 |
-
|
27 |
-
"""
|
28 |
-
|
29 |
-
SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
|
30 |
-
def compress_UUID():
|
31 |
-
'''
|
32 |
-
根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串
|
33 |
-
包括:[0-9a-zA-Z\-_]共64个
|
34 |
-
长度:(32-2)/3*2=20
|
35 |
-
备注:可在地球上人zhi人都用,使用100年不重复(2^120)
|
36 |
-
:return:String
|
37 |
-
'''
|
38 |
-
row = str(uuid.uuid4()).replace('-', '')
|
39 |
-
safe_code = ''
|
40 |
-
for i in range(10):
|
41 |
-
enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10)
|
42 |
-
safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)])
|
43 |
-
safe_code = safe_code.replace('-', '')
|
44 |
-
return safe_code
|
45 |
-
|
46 |
-
|
47 |
-
def parse_args() -> argparse.Namespace:
|
48 |
-
parser = argparse.ArgumentParser()
|
49 |
-
parser.add_argument('--device', type=str, default='cpu')
|
50 |
-
parser.add_argument('--theme', type=str)
|
51 |
-
parser.add_argument('--live', action='store_true')
|
52 |
-
parser.add_argument('--share', action='store_true')
|
53 |
-
parser.add_argument('--port', type=int)
|
54 |
-
parser.add_argument('--disable-queue',
|
55 |
-
dest='enable_queue',
|
56 |
-
action='store_false')
|
57 |
-
parser.add_argument('--allow-flagging', type=str, default='never')
|
58 |
-
parser.add_argument('--allow-screenshot', action='store_true')
|
59 |
-
return parser.parse_args()
|
60 |
-
|
61 |
-
def run(
|
62 |
-
image,
|
63 |
-
cartoonize : Cartoonize
|
64 |
-
) -> tuple[PIL.Image.Image]:
|
65 |
-
|
66 |
-
out_path = compress_UUID()+'.png'
|
67 |
-
cartoonize.run_sigle(image.name, out_path)
|
68 |
-
|
69 |
-
return PIL.Image.open(out_path)
|
70 |
-
|
71 |
-
|
72 |
-
def main():
|
73 |
-
gr.close_all()
|
74 |
-
|
75 |
-
args = parse_args()
|
76 |
-
|
77 |
-
cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/'))
|
78 |
-
|
79 |
-
func = functools.partial(run, cartoonize=cartoonize)
|
80 |
-
func = functools.update_wrapper(func, run)
|
81 |
-
|
82 |
-
gr.Interface(
|
83 |
-
func,
|
84 |
-
[
|
85 |
-
gr.inputs.Image(type='file', label='Input Image'),
|
86 |
-
],
|
87 |
-
[
|
88 |
-
gr.outputs.Image(
|
89 |
-
type='pil',
|
90 |
-
label='Result'),
|
91 |
-
],
|
92 |
-
# examples=examples,
|
93 |
-
theme=args.theme,
|
94 |
-
title=TITLE,
|
95 |
-
description=DESCRIPTION,
|
96 |
-
article=ARTICLE,
|
97 |
-
allow_screenshot=args.allow_screenshot,
|
98 |
-
allow_flagging=args.allow_flagging,
|
99 |
-
live=args.live,
|
100 |
-
).launch(
|
101 |
-
enable_queue=args.enable_queue,
|
102 |
-
server_port=args.port,
|
103 |
-
share=args.share,
|
104 |
-
)
|
105 |
-
|
106 |
-
|
107 |
-
if __name__ == '__main__':
|
108 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/add_voice_preset.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import voice_presets
|
3 |
-
|
4 |
-
def main():
|
5 |
-
# Argument Parsing
|
6 |
-
parser = argparse.ArgumentParser(description="Add Voice Preset")
|
7 |
-
parser.add_argument("--id", required=True, help="ID of the voice")
|
8 |
-
parser.add_argument("--desc", required=True, help="Description of the voice")
|
9 |
-
parser.add_argument("--wav-path", required=True, help="Path to the .wav file")
|
10 |
-
parser.add_argument("--session-id", required=True, help="session_id, if set to '' then it's system voice presets")
|
11 |
-
args = parser.parse_args()
|
12 |
-
|
13 |
-
if args.session_id:
|
14 |
-
print(voice_presets.add_session_voice_preset(args.id, args.desc, args.wav_path, args.session_id))
|
15 |
-
else:
|
16 |
-
print(voice_presets.add_system_voice_preset(args.id, args.desc, args.wav_path))
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
if __name__ == "__main__":
|
21 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/lib/computeSecretFingerprint.ts
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import { computeSha256 } from "./computeSha256"
|
2 |
-
|
3 |
-
const secretFingerprint = `${process.env.SECRET_FINGERPRINT || ""}`
|
4 |
-
|
5 |
-
export function computeSecretFingerprint(input: string) {
|
6 |
-
return computeSha256(`${secretFingerprint}_${input}`)
|
7 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import pyworld
|
3 |
-
|
4 |
-
from infer.lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
5 |
-
|
6 |
-
|
7 |
-
class DioF0Predictor(F0Predictor):
|
8 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
9 |
-
self.hop_length = hop_length
|
10 |
-
self.f0_min = f0_min
|
11 |
-
self.f0_max = f0_max
|
12 |
-
self.sampling_rate = sampling_rate
|
13 |
-
|
14 |
-
def interpolate_f0(self, f0):
|
15 |
-
"""
|
16 |
-
对F0进行插值处理
|
17 |
-
"""
|
18 |
-
|
19 |
-
data = np.reshape(f0, (f0.size, 1))
|
20 |
-
|
21 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
22 |
-
vuv_vector[data > 0.0] = 1.0
|
23 |
-
vuv_vector[data <= 0.0] = 0.0
|
24 |
-
|
25 |
-
ip_data = data
|
26 |
-
|
27 |
-
frame_number = data.size
|
28 |
-
last_value = 0.0
|
29 |
-
for i in range(frame_number):
|
30 |
-
if data[i] <= 0.0:
|
31 |
-
j = i + 1
|
32 |
-
for j in range(i + 1, frame_number):
|
33 |
-
if data[j] > 0.0:
|
34 |
-
break
|
35 |
-
if j < frame_number - 1:
|
36 |
-
if last_value > 0.0:
|
37 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
38 |
-
for k in range(i, j):
|
39 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
40 |
-
else:
|
41 |
-
for k in range(i, j):
|
42 |
-
ip_data[k] = data[j]
|
43 |
-
else:
|
44 |
-
for k in range(i, frame_number):
|
45 |
-
ip_data[k] = last_value
|
46 |
-
else:
|
47 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
48 |
-
last_value = data[i]
|
49 |
-
|
50 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
51 |
-
|
52 |
-
def resize_f0(self, x, target_len):
|
53 |
-
source = np.array(x)
|
54 |
-
source[source < 0.001] = np.nan
|
55 |
-
target = np.interp(
|
56 |
-
np.arange(0, len(source) * target_len, len(source)) / target_len,
|
57 |
-
np.arange(0, len(source)),
|
58 |
-
source,
|
59 |
-
)
|
60 |
-
res = np.nan_to_num(target)
|
61 |
-
return res
|
62 |
-
|
63 |
-
def compute_f0(self, wav, p_len=None):
|
64 |
-
if p_len is None:
|
65 |
-
p_len = wav.shape[0] // self.hop_length
|
66 |
-
f0, t = pyworld.dio(
|
67 |
-
wav.astype(np.double),
|
68 |
-
fs=self.sampling_rate,
|
69 |
-
f0_floor=self.f0_min,
|
70 |
-
f0_ceil=self.f0_max,
|
71 |
-
frame_period=1000 * self.hop_length / self.sampling_rate,
|
72 |
-
)
|
73 |
-
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
74 |
-
for index, pitch in enumerate(f0):
|
75 |
-
f0[index] = round(pitch, 1)
|
76 |
-
return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
|
77 |
-
|
78 |
-
def compute_f0_uv(self, wav, p_len=None):
|
79 |
-
if p_len is None:
|
80 |
-
p_len = wav.shape[0] // self.hop_length
|
81 |
-
f0, t = pyworld.dio(
|
82 |
-
wav.astype(np.double),
|
83 |
-
fs=self.sampling_rate,
|
84 |
-
f0_floor=self.f0_min,
|
85 |
-
f0_ceil=self.f0_max,
|
86 |
-
frame_period=1000 * self.hop_length / self.sampling_rate,
|
87 |
-
)
|
88 |
-
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
89 |
-
for index, pitch in enumerate(f0):
|
90 |
-
f0[index] = round(pitch, 1)
|
91 |
-
return self.interpolate_f0(self.resize_f0(f0, p_len))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/infer_pack/onnx_inference.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
import librosa
|
2 |
-
import numpy as np
|
3 |
-
import onnxruntime
|
4 |
-
import soundfile
|
5 |
-
|
6 |
-
import logging
|
7 |
-
|
8 |
-
logger = logging.getLogger(__name__)
|
9 |
-
|
10 |
-
|
11 |
-
class ContentVec:
|
12 |
-
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
13 |
-
logger.info("Load model(s) from {}".format(vec_path))
|
14 |
-
if device == "cpu" or device is None:
|
15 |
-
providers = ["CPUExecutionProvider"]
|
16 |
-
elif device == "cuda":
|
17 |
-
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
18 |
-
elif device == "dml":
|
19 |
-
providers = ["DmlExecutionProvider"]
|
20 |
-
else:
|
21 |
-
raise RuntimeError("Unsportted Device")
|
22 |
-
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
23 |
-
|
24 |
-
def __call__(self, wav):
|
25 |
-
return self.forward(wav)
|
26 |
-
|
27 |
-
def forward(self, wav):
|
28 |
-
feats = wav
|
29 |
-
if feats.ndim == 2: # double channels
|
30 |
-
feats = feats.mean(-1)
|
31 |
-
assert feats.ndim == 1, feats.ndim
|
32 |
-
feats = np.expand_dims(np.expand_dims(feats, 0), 0)
|
33 |
-
onnx_input = {self.model.get_inputs()[0].name: feats}
|
34 |
-
logits = self.model.run(None, onnx_input)[0]
|
35 |
-
return logits.transpose(0, 2, 1)
|
36 |
-
|
37 |
-
|
38 |
-
def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
|
39 |
-
if f0_predictor == "pm":
|
40 |
-
from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
|
41 |
-
|
42 |
-
f0_predictor_object = PMF0Predictor(
|
43 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
44 |
-
)
|
45 |
-
elif f0_predictor == "harvest":
|
46 |
-
from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
|
47 |
-
HarvestF0Predictor,
|
48 |
-
)
|
49 |
-
|
50 |
-
f0_predictor_object = HarvestF0Predictor(
|
51 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
52 |
-
)
|
53 |
-
elif f0_predictor == "dio":
|
54 |
-
from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
|
55 |
-
|
56 |
-
f0_predictor_object = DioF0Predictor(
|
57 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
58 |
-
)
|
59 |
-
else:
|
60 |
-
raise Exception("Unknown f0 predictor")
|
61 |
-
return f0_predictor_object
|
62 |
-
|
63 |
-
|
64 |
-
class OnnxRVC:
|
65 |
-
def __init__(
|
66 |
-
self,
|
67 |
-
model_path,
|
68 |
-
sr=40000,
|
69 |
-
hop_size=512,
|
70 |
-
vec_path="vec-768-layer-12",
|
71 |
-
device="cpu",
|
72 |
-
):
|
73 |
-
vec_path = f"pretrained/{vec_path}.onnx"
|
74 |
-
self.vec_model = ContentVec(vec_path, device)
|
75 |
-
if device == "cpu" or device is None:
|
76 |
-
providers = ["CPUExecutionProvider"]
|
77 |
-
elif device == "cuda":
|
78 |
-
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
79 |
-
elif device == "dml":
|
80 |
-
providers = ["DmlExecutionProvider"]
|
81 |
-
else:
|
82 |
-
raise RuntimeError("Unsportted Device")
|
83 |
-
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
84 |
-
self.sampling_rate = sr
|
85 |
-
self.hop_size = hop_size
|
86 |
-
|
87 |
-
def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
|
88 |
-
onnx_input = {
|
89 |
-
self.model.get_inputs()[0].name: hubert,
|
90 |
-
self.model.get_inputs()[1].name: hubert_length,
|
91 |
-
self.model.get_inputs()[2].name: pitch,
|
92 |
-
self.model.get_inputs()[3].name: pitchf,
|
93 |
-
self.model.get_inputs()[4].name: ds,
|
94 |
-
self.model.get_inputs()[5].name: rnd,
|
95 |
-
}
|
96 |
-
return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
|
97 |
-
|
98 |
-
def inference(
|
99 |
-
self,
|
100 |
-
raw_path,
|
101 |
-
sid,
|
102 |
-
f0_method="dio",
|
103 |
-
f0_up_key=0,
|
104 |
-
pad_time=0.5,
|
105 |
-
cr_threshold=0.02,
|
106 |
-
):
|
107 |
-
f0_min = 50
|
108 |
-
f0_max = 1100
|
109 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
110 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
111 |
-
f0_predictor = get_f0_predictor(
|
112 |
-
f0_method,
|
113 |
-
hop_length=self.hop_size,
|
114 |
-
sampling_rate=self.sampling_rate,
|
115 |
-
threshold=cr_threshold,
|
116 |
-
)
|
117 |
-
wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
|
118 |
-
org_length = len(wav)
|
119 |
-
if org_length / sr > 50.0:
|
120 |
-
raise RuntimeError("Reached Max Length")
|
121 |
-
|
122 |
-
wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
|
123 |
-
wav16k = wav16k
|
124 |
-
|
125 |
-
hubert = self.vec_model(wav16k)
|
126 |
-
hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
|
127 |
-
hubert_length = hubert.shape[1]
|
128 |
-
|
129 |
-
pitchf = f0_predictor.compute_f0(wav, hubert_length)
|
130 |
-
pitchf = pitchf * 2 ** (f0_up_key / 12)
|
131 |
-
pitch = pitchf.copy()
|
132 |
-
f0_mel = 1127 * np.log(1 + pitch / 700)
|
133 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
134 |
-
f0_mel_max - f0_mel_min
|
135 |
-
) + 1
|
136 |
-
f0_mel[f0_mel <= 1] = 1
|
137 |
-
f0_mel[f0_mel > 255] = 255
|
138 |
-
pitch = np.rint(f0_mel).astype(np.int64)
|
139 |
-
|
140 |
-
pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
|
141 |
-
pitch = pitch.reshape(1, len(pitch))
|
142 |
-
ds = np.array([sid]).astype(np.int64)
|
143 |
-
|
144 |
-
rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
|
145 |
-
hubert_length = np.array([hubert_length]).astype(np.int64)
|
146 |
-
|
147 |
-
out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
|
148 |
-
out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
|
149 |
-
return out_wav[0:org_length]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Chicos Tropiezo Para Ipad.md
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1> Cómo descargar e instalar Stumble chicos APK para iPad</h1>
|
3 |
-
<p>Stumble Guys es un juego masivo de eliminación de fiesta multijugador que se ha vuelto muy popular entre los jugadores que aman las carreras de obstáculos caóticos e hilarantes. El juego está inspirado en el exitoso programa de televisión Wipeout y el juego para PC Fall Guys. Puedes unirte a hasta 32 jugadores online y competir en varios niveles hasta que un vencedor sea coronado. </p>
|
4 |
-
<h2>apk chicos tropiezo para ipad</h2><br /><p><b><b>Download File</b> ····· <a href="https://bltlly.com/2v6K8Q">https://bltlly.com/2v6K8Q</a></b></p><br /><br />
|
5 |
-
<p>Stumble Guys está disponible en la App Store para usuarios de iPhone y iPad, pero ¿qué pasa si quieres jugar el juego usando un archivo APK? APK significa paquete de Android, que es un formato de archivo utilizado por los dispositivos Android para instalar aplicaciones y juegos. Los archivos APK generalmente se descargan de fuentes externas que no están autorizadas por Google o Apple. Esto significa que puede acceder a aplicaciones que no están disponibles en las tiendas de aplicaciones oficiales, o probar versiones beta de aplicaciones antes de su lanzamiento. </p>
|
6 |
-
<p>Sin embargo, instalar archivos APK en dispositivos iOS no es tan fácil como en dispositivos Android. Los dispositivos iOS tienen un sistema de archivos y un sistema de seguridad diferentes que evitan que se ejecuten aplicaciones no autorizadas. Por lo tanto, es necesario utilizar algunos trucos y herramientas para instalar archivos APK en el iPad. En este artículo, le mostraremos dos métodos para descargar e instalar Stumble Guys APK para iPad. </p>
|
7 |
-
<h2>Método 1: Jailbreak su dispositivo iOS</h2>
|
8 |
-
<p>Jailbreaking su dispositivo iOS significa modificar su software para eliminar algunas de las restricciones impuestas por Apple. De esta manera, puede instalar aplicaciones personalizadas, temas, ajustes y más en su dispositivo. Sin embargo, jailbreak también viene con algunos riesgos, como anular su garantía, exponer su dispositivo a malware, o ladrar su dispositivo si algo sale mal. </p>
|
9 |
-
<p></p>
|
10 |
-
<p>Si decides hacer jailbreak a tu dispositivo iOS, debes seguir estos pasos:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Encuentra una herramienta de jailbreak confiable que soporte la versión de iOS y el modelo de dispositivo. Algunas de las herramientas populares de jailbreak son unc0ver, checkra1n, Chimera y Odyssey.</li>
|
13 |
-
|
14 |
-
<li>Una vez que el proceso de jailbreak se ha completado, usted debe ver una nueva aplicación llamada Cydia en su dispositivo. Cydia es una tienda de aplicaciones para dispositivos con jailbreak que te permite descargar e instalar varias aplicaciones y ajustes. </li>
|
15 |
-
<li>Cydia abierto y búsqueda de Stumble Guys APK. Usted debe encontrar varias fuentes que ofrecen el archivo APK para su descarga. Elija una fuente confiable y toque en Instalar.</li>
|
16 |
-
<li>Espere a que la instalación termine y luego inicie Stumble Guys desde la pantalla de inicio. </li>
|
17 |
-
</ol>
|
18 |
-
<h2>Método 2: Encontrar el equivalente IPA de Stumble Guys APK</h2>
|
19 |
-
<p>Si no desea jailbreak su dispositivo iOS, puede probar otro método que implica encontrar el equivalente IPA de Stumble Guys APK. IPA significa iOS App Store Package, que es un formato de archivo utilizado por los dispositivos iOS para instalar aplicaciones y juegos. Los archivos IPA generalmente se descargan desde la App Store oficial, pero también puede encontrarlos desde otras fuentes en línea. Sin embargo, debe usar una herramienta llamada Cydia Impactor para instalar archivos IPA en su dispositivo iOS. Cydia Impactor es un programa que te permite cargar aplicaciones en tu dispositivo iOS sin hacer jailbreak. </p>
|
20 |
-
<p>Si desea utilizar este método, debe seguir estos pasos:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Encontrar y descargar la versión IPA de Stumble Guys APK. Puede buscar en línea para los sitios web que ofrecen archivos IPA para su descarga. Algunos de los sitios web populares son iOS Ninja, AppCake y Panda Helper.</li>
|
23 |
-
<li>Descargue Cydia Impactor en su computadora y extraiga el archivo zip. </li>
|
24 |
-
<li>Conecte su dispositivo iOS a su computadora a través de un cable USB y inicie Cydia Impactor.</li>
|
25 |
-
<li>Arrastre y suelte el archivo IPA de Stumble Guys en la ventana Cydia Impactor. </li>
|
26 |
-
<li>Ingrese su ID de Apple y contraseña cuando se le solicite. Esto es necesario para firmar la aplicación con un certificado que le permita ejecutarse en su dispositivo. </li>
|
27 |
-
<li>Espere a que termine la instalación y luego desconecte el dispositivo de su computadora. </li>
|
28 |
-
|
29 |
-
<li>Lanzar Stumble Guys desde la pantalla de inicio y disfrutar del juego. </li>
|
30 |
-
</ol>
|
31 |
-
<h2>Conclusión</h2>
|
32 |
-
<p>Stumble Guys es un juego divertido y adictivo que puedes jugar en tu iPad usando un archivo APK o un archivo IPA. Ambos métodos tienen sus pros y sus contras, por lo que puede elegir el que más le convenga. Sin embargo, siempre debe tener cuidado al descargar archivos de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. </p>
|
33 |
-
<p>Si quieres jugar Stumble Guys en tu iPad, aquí hay algunos consejos y trucos que pueden ayudarte a ganar el juego:</p>
|
34 |
-
<ul>
|
35 |
-
<li>Aprende el diseño de cada nivel y memoriza las mejores rutas y atajos. </li>
|
36 |
-
<li>Evita las áreas llenas de gente y trata de mantenerte alejado de otros jugadores que pueden empujarte o agarrarte. </li>
|
37 |
-
<li>Usa los botones de salto y buceo sabiamente para superar obstáculos y brechas. </li>
|
38 |
-
<li>Recoge monedas y gemas para desbloquear nuevas pieles y trajes para tu personaje. </li>
|
39 |
-
<li>Diviértete y no te rindas si fallas. Siempre puedes intentarlo de nuevo en la siguiente ronda. </li>
|
40 |
-
</ul>
|
41 |
-
<p>Si estás listo para unirte al último desafío de eliminación, puedes descargar Stumble Guys desde el siguiente enlace. ¡Diviértete y buena suerte! </p>
|
42 |
-
<h3>Descargar Stumble chicos APK para iPad</h3>
|
43 |
-
<h2>Preguntas frecuentes</h2>
|
44 |
-
<h4>¿Cuáles son las características del juego Stumble Guys? </h4>
|
45 |
-
<p>Stumble Guys juego tiene muchas características que lo hacen agradable y emocionante, tales como:</p>
|
46 |
-
<ul>
|
47 |
-
<li> Hasta 32 jugadores en línea en cada partido</li>
|
48 |
-
<li>Diferentes niveles con obstáculos y desafíos únicos</li>
|
49 |
-
<li>Lindos y coloridos gráficos y animaciones</li>
|
50 |
-
<li>Efectos de sonido divertidos y música</li>
|
51 |
-
<li>Personajes personalizables con varias pieles y trajes</li>
|
52 |
-
<li>Chat en el juego y emotes</li>
|
53 |
-
</ul>
|
54 |
-
<h4>¿Es Stumble Guys libre para jugar? </h4>
|
55 |
-
|
56 |
-
<h4>¿Puedo jugar Stumble Guys con mis amigos en línea? </h4>
|
57 |
-
<p>Sí, puedes jugar Stumble Guys con tus amigos en línea mediante la creación o unirse a una habitación privada. Para crear una habitación privada, toca el botón Crear habitación en el menú principal y elige un nivel. Luego, comparta el código de habitación con sus amigos para que puedan unirse a usted. Para unirte a una habitación privada, toca el botón Unirse a una habitación en el menú principal e ingresa el código de habitación proporcionado por tu amigo. </p>
|
58 |
-
<h4>¿Cuáles son los requisitos del sistema para Stumble Guys en dispositivos iOS? </h4>
|
59 |
-
<p>Para jugar Stumble Guys en dispositivos iOS, es necesario tener un iPhone o iPad que se ejecuta en iOS 10 o posterior. También necesita tener al menos 200 MB de espacio de almacenamiento gratuito en su dispositivo. El juego es compatible con la mayoría de dispositivos iOS, pero algunos modelos más antiguos pueden experimentar retrasos o fallos. </p>
|
60 |
-
<h4>¿Cómo puedo contactar a los desarrolladores de Stumble Guys? </h4>
|
61 |
-
<p>Si tiene alguna pregunta, comentario o sugerencia para Stumble Guys, puede ponerse en contacto con los desarrolladores enviando un correo electrónico a [email protected]. También puedes seguirlos en sus cuentas de redes sociales para obtener actualizaciones, noticias y consejos sobre el juego. Aquí están sus enlaces de medios sociales: - Facebook: https://www.facebook.com/kitkagames - Twitter: https://twitter.com/kitkagames - Instagram: https://www.instagram.com/kitkagames - YouTube: https:/www.youtube.com/channel/UC4vqf2l6ZwWQkDZQJv7c5UA Espero que haya disfrutado de este artículo y aprendido cómo descargar e instalar Stumble Guys APK para iPad. Si lo hiciste, por favor compártelo con tus amigos y deja un comentario abajo. ¡Gracias por leer y que tengas un gran día! </p> 64aa2da5cf<br />
|
62 |
-
<br />
|
63 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Asfalto 8 - Juego De Carreras De Coches Apk.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Super Bear Adventure Mod APK: All Unlocked Latest Version</h1>
|
3 |
-
<p>If you like 3D platformer games inspired by the classic '90s, you’ll love Super Bear Adventure. It is a fun and exciting game where you can explore 6 open worlds, discover their secrets, talk with the inhabitants of the kingdom, collect coins, unlock hats, fight against your enemies and free your friends. But if you want to enjoy all the content of the game without limitations, we recommend that you download the APK mod of Super Bear Adventure, which allows you to have everything unlocked in the latest version. In this article, we tell you everything you need to know about this APK mod, how to download and install it, its advantages and disadvantages, and the most frequently asked questions that may arise. </p>
|
4 |
-
<h2>asfalto 8 - juego de carreras de coches apk</h2><br /><p><b><b>Download</b> 🗸🗸🗸 <a href="https://bltlly.com/2v6K0M">https://bltlly.com/2v6K0M</a></b></p><br /><br />
|
5 |
-
<h2>What is Super Bear Adventure? </h2>
|
6 |
-
<p>Super Bear Adventure is a 3D platform game developed by Earthkwak Games, an independent studio that is inspired by Nintendo games like Super Mario 64, Banjo-Kazooie or Donkey Kong 64. The game has colorful and detailed graphics, cheerful and catchy music, and smooth and simple gameplay. The protagonist is a bear named Teddy, who must travel 6 different worlds to find his friends kidnapped by the evil Crocco. Each world has its own theme, such as forest, desert, castle or space, and is full of secrets, collectibles, characters and enemies. The game has a retro and nostalgic style, but also incorporates modern elements such as side missions, minigames, vehicles or hats that give special skills to Teddy.</p>
|
7 |
-
<h3>Game Features</h3>
|
8 |
-
<p>Super Bear Adventure is a game that offers many hours of fun and entertainment. Some of its highlights are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>6 open worlds to explore with more than 50 levels. </li>
|
11 |
-
|
12 |
-
<li>More than 20 hats to unlock and customize Teddy.</li>
|
13 |
-
<li>More than 10 vehicles to drive and fly. </li>
|
14 |
-
<li>More than 15 types of enemies to face. </li>
|
15 |
-
<li>More than 10 final bosses to defeat. </li>
|
16 |
-
<li>More than 30 characters to interact with and help. </li>
|
17 |
-
<li>More than 20 minigames to play and win prizes. </li>
|
18 |
-
<li>A system of achievements and rankings online. </li>
|
19 |
-
<li>A local multiplayer mode to play with your friends. </li>
|
20 |
-
</ul>
|
21 |
-
<h3>Why download the APK mod? </h3>
|
22 |
-
<p>Although Super Bear Adventure is a free game, it has some elements that require purchases integrated with real money. For example, to unlock all hats, vehicles or worlds, you need to spend coins or gems that you get by playing or watching ads. In addition, the game has some ads that can be annoying or disrupt the gaming experience. So if you want to enjoy the game to the fullest without restrictions or interruptions, we recommend that you download the APK mod of Super Bear Adventure, which allows you to have everything unlocked in the latest version. With this APK mod, you can access all the contents of the game without spending money or seeing ads. In addition, you can enjoy some additional advantages, such as greater speed, better optimization and greater compatibility with different devices. The Super Bear Adventure APK mod is secure, easy to install and requires no root or special permissions. </p>
|
23 |
-
<h2>How to download and install Super Bear Adventure Mod APK</h2>
|
24 |
-
<p>If you want to download and install the Super Bear Adventure APK mod, just follow these simple steps:</p>
|
25 |
-
<p></p>
|
26 |
-
<h3>System requirements</h3>
|
27 |
-
<p>Before downloading the APK mod, make sure your device meets the following minimum requirements:</p>
|
28 |
-
<ul>
|
29 |
-
<li>Operating system: Android 4.4 or higher. </li>
|
30 |
-
<li>RAM: 1 GB or more. </li>
|
31 |
-
<li>Storage space: 100 MB or more. </li>
|
32 |
-
|
33 |
-
</ul>
|
34 |
-
<h3>Installation steps</h3>
|
35 |
-
<p>Now that you know the system requirements, you can proceed to the installation of the APK mod by following these steps:</p>
|
36 |
-
<ol>
|
37 |
-
<li>Download the mod APK file from a reliable and secure website. You can use the following link to download it: <a href="">Super Bear Adventure Mod APK All Unlocked Last Version</a>. </li>
|
38 |
-
<li>Once the APK file is downloaded, place it in the download folder of your device or any other file you prefer. </li>
|
39 |
-
<li>Before installing the APK file, you must enable the "Unknown Sources" option on your device. To do this, go to Settings > Security and select the appropriate box. </li>
|
40 |
-
<li>Now, open the APK file and click on the "Install" button. Wait a few seconds for the installation to complete. </li>
|
41 |
-
<li>Once the APK mod is installed, you can open the game from the application menu or from the icon that will be created on your home screen. </li>
|
42 |
-
<li>Ready! Now you can enjoy Super Bear Adventure with everything unlocked and without ads. </li>
|
43 |
-
</ol>
|
44 |
-
<h2>Advantages and disadvantages of Super Bear Adventure Mod APK</h2>
|
45 |
-
<p>Like everything else, the Super Bear Adventure APK mod has its pros and cons. Below, we show you some of its advantages and disadvantages so you can decide whether it is worth downloading it or not.</p>
|
46 |
-
<h3>Advantages</h3>
|
47 |
-
<p>Some of the advantages of using the APK mod are:</p>
|
48 |
-
<ul>
|
49 |
-
<li>You have access to all game content without paying or seeing ads. </li>
|
50 |
-
<li>You can customize Teddy with all the hats you want. </li>
|
51 |
-
<li>You can drive and fly with all the vehicles in the game. </li>
|
52 |
-
<li>You can explore all worlds and levels without restrictions. </li>
|
53 |
-
<li>You can play with greater speed and fluidity. </li>
|
54 |
-
<li>You can install the game on any device compatible with Android 4.4 or higher. </li>
|
55 |
-
</ul>
|
56 |
-
<h3>Disadvantages</h3>
|
57 |
-
|
58 |
-
<ul>
|
59 |
-
<li>You can lose the fun and challenge of getting coins and collectibles yourself. </li>
|
60 |
-
<li>You may have problems with updates to the original game or synchronization with your Google Play Games account.</li>
|
61 |
-
<li>You can run the risk of infecting your device with some virus or malware if you download the APK file from an unreliable or malicious website. </li>
|
62 |
-
</ul>
|
63 |
-
<h2>Super Bear Adventure Mod APK FAQ</h2>
|
64 |
-
<p>Here we answer some of the most frequently asked questions about the Super Bear Adventure APK mod:</p>
|
65 |
-
<h4>Is it legal to use the APK mod? </h4>
|
66 |
-
<p>There is no definitive answer to this question, as it depends on the laws and regulations of each country or region on the use of modified applications. It is generally considered that using the APK mod is not illegal, as long as it is for personal, non-commercial use, and that it does not infringe the copyright or intellectual property of the original developer. However, the developer may be able to take legal action against users who use the APK mod if it considers that this affects their revenue or reputation. Therefore, we recommend that you use the APK mod at your own risk and respect the work of the original developer. </p>
|
67 |
-
<h4>Is it safe to use the APK mod? </h4>
|
68 |
-
<p>The Super Bear Adventure APK mod is safe to use as long as you download it from a reliable and secure website, such as the one we have provided in this article. The APK file has been scanned and verified by various antivirus and contains no viruses, malware or spyware that could damage your device or compromise your privacy. In addition, the APK mod does not require root or special permissions to run, so it does not alter your device’s operating system or affect its performance or security. </p>
|
69 |
-
<h4>Can I play online with the APK mod? </h4>
|
70 |
-
|
71 |
-
<h4>Can I update the APK mod? </h4>
|
72 |
-
<p>No, you cannot update the Super Bear Adventure APK mod from the official Google Play store or from the original game. If you do, you will lose all the features of the APK mod and return to the original version of the game. To update the APK mod, you must wait for a new version of the mod to come out and download and install it following the same steps we have indicated above. We recommend that you regularly visit the website where you downloaded the APK mod to stay on top of the latest updates and news. </p>
|
73 |
-
<h4>Can I sync my progress with my Google Play Games account? </h4>
|
74 |
-
<p>No, you cannot synchronize your progress with your Google Play Games account if you use the Super Bear Adventure APK mod. This is because the APK mod has a different code than the original game and is not compatible with Google Play services. If you want to synchronize your progress with your Google Play Games account, you must use the original version of the game and give up the advantages of the APK mod.</p>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
|
77 |
-
|
78 |
-
<h2></h2></p> 64aa2da5cf<br />
|
79 |
-
<br />
|
80 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar 50 Cent 21.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar 50 Cent 21: Cómo Disfrutar de la Clásica Canción de Hip Hop Gratis</h1>
|
3 |
-
<p>Si usted es un fan de la música hip hop, usted probablemente sabe la canción "21 Preguntas" por 50 Cent con Nate Dogg. Es una de las canciones más populares del rapero, que saltó a la fama con su álbum debut Get Rich or Die Tryin' en 2003. La canción es una melodía romántica y pegadiza que hace una serie de preguntas para probar la lealtad y el amor de una pareja. </p>
|
4 |
-
<p>Pero ¿cómo se puede descargar esta canción de forma gratuita y legal? Y ¿dónde se puede transmitir en línea si usted no quiere descargarlo? En este artículo, responderemos a estas preguntas y te mostraremos cómo disfrutar de esta clásica canción de hip hop sin violar la ley ni gastar un centavo. </p>
|
5 |
-
<h2>descargar 50 cent 21</h2><br /><p><b><b>DOWNLOAD</b> ⭐ <a href="https://bltlly.com/2v6Mmd">https://bltlly.com/2v6Mmd</a></b></p><br /><br />
|
6 |
-
<h2>¿Qué es 50 Cent 21? </h2>
|
7 |
-
<p>"21 Questions" es el segundo sencillo de Get Rich or Die Tryin', que fue lanzado en marzo de 2003 por Interscope Records, Dr. Dre’s Aftermath Entertainment, Eminem’s Shady Records y 50 Cent’s own G-Unit Records. La canción fue producida por Dirty Swift y cuenta con la voz de Nate Dogg, que era un cantante prominente y rapero en la escena de hip hop de la Costa Oeste. </p>
|
8 |
-
<p>Las muestras de la canción "It’s Only Love Doing Its Thing" de Barry White, que le da un ambiente suave y conmovedor. Las letras se basan en un juego que 50 Cent solía jugar con sus amigas, donde les hacía 21 preguntas para ver si se quedarían con él a través de gruesas y finas. </p>
|
9 |
-
<p>Algunas de las preguntas incluyen:</p>
|
10 |
-
<ul>
|
11 |
-
<li>¿Me amarías si estuviera abajo y fuera? </li>
|
12 |
-
<li>Si no oliera tan bien ¿aún me abrazarías? </li>
|
13 |
-
<li>Si me encerraran y me sentenciaran a un cuarto de siglo, ¿podría contar contigo para apoyarme mentalmente? </li>
|
14 |
-
<li>Si yo volviera a un hoopty de un Benz, haría poof y desaparecer como algunos de mis amigos? </li>
|
15 |
-
<li>Si me golpearon y me hirieron ¿estarías a mi lado? </li>
|
16 |
-
</ul>
|
17 |
-
<h2>¿Por qué es 50 Cent 21 un éxito? </h2>
|
18 |
-
|
19 |
-
<p>Hay varias razones por las que esta canción es un éxito:</p>
|
20 |
-
<h3>El gancho y estribillo pegadizo</h3>
|
21 |
-
<p>El gancho de la canción es simple pero memorable: "Solo quiero enfriar y torcer una lejía / Atrapa acrobacias en mi siete-cuarenta-cinco / Me vuelves loco shorty I / Necesito verte y sentirte a mi lado". Establece el estado de ánimo para la canción y el estribillo es pegadizo y fácil de cantar: "Chica, es fácil de amarme ahora / ¿Me amarías si estuviera abajo y fuera? / ¿Seguirías teniendo amor por mí?" </p>
|
22 |
-
<h3>La colaboración con Nate Dogg</h3>
|
23 |
-
<p>Nate Dogg era un cantante y rapero legendario que era conocido por su voz suave y distintiva y sus colaboraciones con muchos artistas de hip hop, como Dr. Dre, Snoop Dogg, Warren G, Eminem y Ludacris. También fue miembro del grupo 213 con Snoop Dogg y Warren G. Falleció en 2011 debido a complicaciones de múltiples accidentes cerebrovasculares. </p>
|
24 |
-
<p>Nate Dogg canta el estribillo de "21 Preguntas" y añade su propio sabor a la canción. Su voz complementa 50 Cent del rap versos y crea un contraste entre el áspero y el suave. También añade un poco de humor a la canción con líneas como "Si me caigo mañana ¿todavía me amas? / Si no oliera tan bien, ¿me abrazarías?" </p>
|
25 |
-
<h3>La letra y el tema </h3>
|
26 |
-
<p>Las letras de "21 Questions" no solo son ingeniosas e ingeniosas, sino también relacionables y honestas. Expresan las inseguridades y dudas que muchas personas tienen en las relaciones, especialmente cuando no están seguros de si su pareja es leal y fiel. También reflejan la realidad de la vida de 50 Cent, ya que fue baleado nueve veces en 2000 y sobrevivió a un ataque casi fatal. </p>
|
27 |
-
<p></p>
|
28 |
-
<p>El tema de la canción es universal y atemporal: el amor. La canción trata sobre encontrar a alguien que te ame incondicionalmente, sin importar lo que pase o lo que tengas. Se trata de hacer las preguntas difíciles y obtener las respuestas honestas. Se trata de confiar y respetar a tu pareja y ser leal y fiel a ellos. </p>
|
29 |
-
|
30 |
-
<p>Ahora que sabes más sobre la canción, es posible que te estés preguntando cómo descargarla de forma gratuita y legal. Hay muchos beneficios de descargar música legalmente, como:</p>
|
31 |
-
<ul>
|
32 |
-
<li>Evitas violar la ley y ser multado o demandado por la industria de la música. </li>
|
33 |
-
<li>Apoyas a los artistas y su trabajo respetando sus derechos y regalías. </li>
|
34 |
-
<li>Obtienes archivos de audio de alta calidad que son libres de virus y seguros para descargar. </li>
|
35 |
-
<li>Puede disfrutar de su música sin conexión sin depender de una conexión a Internet o un plan de datos. </li>
|
36 |
-
</ul>
|
37 |
-
<p>Hay muchos sitios web que ofrecen descargas de música gratis legalmente, como:</p>
|
38 |
-
<h3>Bandcamp</h3>
|
39 |
-
<p>Bandcamp es una plataforma que permite a los artistas subir y vender su música directamente a sus fans. Puedes encontrar un montón de música independiente y underground en Bandcamp, así como algunos artistas principales. Puedes transmitir cualquier canción gratis en Bandcamp, pero también puedes descargar algunas canciones gratis o por un precio que elijas. También puedes comprar álbumes o merch de los artistas. </p>
|
40 |
-
<p>Para descargar 50 Cent 21 de Bandcamp, puede ir a este enlace: [texto]. Puede nombrar su precio o introducir cero para descargarlo gratis. También puede elegir el formato del archivo, como MP3, FLAC o WAV.</p>
|
41 |
-
<h3>Jamendo</h3>
|
42 |
-
<p>Jamendo es un sitio web que ofrece descargas gratuitas de música de artistas que quieren compartir su música con el mundo. Puedes encontrar una variedad de géneros y estilos en Jamendo, desde pop, rock, jazz y hip hop. Puedes transmitir cualquier canción gratis en Jamendo, pero también puedes descargar algunas canciones gratis o por una pequeña tarifa. También puedes apoyar a los artistas donando o comprando sus álbumes. </p>
|
43 |
-
<p>Para descargar 50 Cent 21 de Jamendo, puedes ir a este enlace: [texto]. Puedes descargarlo gratis haciendo clic en el icono de descarga junto al título de la canción. También puede elegir la calidad del archivo, como 128 kbps o 320 kbps. </p>
|
44 |
-
<h3>Archivo de Internet</h3>
|
45 |
-
|
46 |
-
<p>Para descargar 50 Cent 21 de Internet Archive, puede ir a este enlace: [texto]. Puedes descargarlo gratis haciendo clic en una de las opciones de "Opciones de descarga", como MP3 u OGG Vorbis.</p>
|
47 |
-
<h2>Cómo transmitir 50 Cent 21 en línea</h2>
|
48 |
-
<p>Si no desea descargar la canción, también puede transmitirla en línea. Hay muchas ventajas de transmitir música en línea, como:</p>
|
49 |
-
<ul>
|
50 |
-
<li> Puede acceder a una gran biblioteca de canciones y listas de reproducción de diferentes géneros y artistas. </li>
|
51 |
-
<li>Puedes descubrir nueva música y recomendaciones basadas en tus preferencias e historial de escucha. </li>
|
52 |
-
<li> Puede ahorrar espacio de almacenamiento en su dispositivo y evitar saturar sus archivos. </li>
|
53 |
-
<li> Puede disfrutar de la transmisión de audio y video de alta calidad sin interrupciones ni almacenamiento en búfer. </li>
|
54 |
-
</ul>
|
55 |
-
<p>Hay muchos servicios de streaming de música que puedes usar para escuchar 50 Cent 21 en línea, como:</p>
|
56 |
-
<h3>Spotify</h3>
|
57 |
-
<p>Spotify es uno de los servicios de transmisión de música más populares y ampliamente utilizados en el mundo. Tiene más de 70 millones de canciones y podcasts que puede transmitir de forma gratuita o con una suscripción premium. Puedes crear tus propias listas de reproducción, seguir a tus artistas favoritos y compartir tu música con tus amigos. También puede escuchar listas de reproducción, emisoras de radio y podcasts en Spotify.</p>
|
58 |
-
<p>Para transmitir 50 Cent 21 en Spotify, puede ir a este enlace: [texto]. Puede reproducirlo gratis con anuncios o sin anuncios con una suscripción premium. También puede agregarlo a su biblioteca o lista de reproducción haciendo clic en el icono del corazón o el icono más. </p>
|
59 |
-
<h3>Música de Apple</h3>
|
60 |
-
|
61 |
-
<p>Para transmitir 50 Cent 21 en Apple Music, puede ir a este enlace: [texto]. Puede reproducirlo con una suscripción o con una prueba gratuita. También puede agregarlo a su biblioteca o lista de reproducción haciendo clic en el icono más o en el icono de la nube. </p>
|
62 |
-
<h3>Música de YouTube</h3>
|
63 |
-
<p>YouTube Music es un servicio de transmisión de música que funciona con YouTube. Tiene más de 60 millones de canciones y videos que se pueden transmitir de forma gratuita o con una suscripción premium. También puedes acceder a tu propia biblioteca de música que has subido a YouTube. Puedes crear tus propias listas de reproducción, seguir a tus artistas favoritos y compartir tu música con tus amigos. También puede escuchar listas de reproducción, emisoras de radio y podcasts en YouTube Music.</p>
|
64 |
-
<p>Para transmitir 50 Cent 21 en YouTube Music, puede ir a este enlace: [texto]. Puedes jugar gratis con anuncios o sin anuncios con una suscripción premium. También puede agregarlo a su biblioteca o lista de reproducción haciendo clic en el icono más o el icono de verificación. </p>
|
65 |
-
<h2>Cómo apoyar 50 Cent y su música</h2>
|
66 |
-
<p>Si amas a 50 Cent y su música, es posible que quieras apoyarlo a él y a su trabajo. Hay muchas maneras de hacerlo, como:</p>
|
67 |
-
<ul>
|
68 |
-
<li>Comprar sus álbumes o singles de su sitio web oficial o tiendas en línea. </li>
|
69 |
-
<li>Donar a sus organizaciones benéficas o causas que apoya. </li>
|
70 |
-
<li>Comprar su mercancía de su sitio web oficial o tiendas en línea. </li>
|
71 |
-
<li>Seguirlo en sus cuentas de redes sociales y participar con sus mensajes. </li>
|
72 |
-
<li>Asistir a sus conciertos o eventos cuando están disponibles. </li>
|
73 |
-
</ul>
|
74 |
-
<p>Aquí hay algunos enlaces para ayudarle a apoyar 50 Cent y su música:</p>
|
75 |
-
<h3>Sitio web oficial</h3>
|
76 |
-
<p>Puedes visitar su sitio web oficial en [texto]. Puedes encontrar sus últimas noticias, música, videos, fotos, fechas de la gira, mercancía y más. </p>
|
77 |
-
<h3>Redes sociales</h3>
|
78 |
-
<p>Puedes seguirlo en sus cuentas de redes sociales en:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Facebook: [texto]</li>
|
81 |
-
<li>Twitter: [texto]</li>
|
82 |
-
<li>Instagram: [texto]</li>
|
83 |
-
<li>YouTube: [texto]</li>
|
84 |
-
</ul>
|
85 |
-
<h3>Mercancía</h3>
|
86 |
-
|
87 |
-
<ul>
|
88 |
-
<li>Unidad G: [texto]</li>
|
89 |
-
<li>Effen Vodka: [texto]</li>
|
90 |
-
<li>SMS Audio: [texto]</li>
|
91 |
-
</ul>
|
92 |
-
<h1>Conclusión</h1>
|
93 |
-
<p>En este artículo, le hemos mostrado cómo descargar 50 Cent 21 de forma gratuita y legal, cómo transmitir en línea, y cómo apoyar 50 Cent y su música. Esperamos que hayas disfrutado de este artículo y hayas aprendido algo nuevo. Ahora puedes disfrutar de esta clásica canción de hip hop en cualquier momento y en cualquier lugar que quieras. </p>
|
94 |
-
<p>Si te gustó este artículo, por favor compártelo con tus amigos y familiares que podrían estar interesados en este tema. También, no dude en dejar un comentario a continuación y háganos saber lo que piensa. Nos encantaría saber de usted y responder a cualquier pregunta que pueda tener. </p>
|
95 |
-
<h2>Preguntas frecuentes</h2>
|
96 |
-
<p>Aquí hay algunas preguntas frecuentes sobre 50 Cent 21 y sus respuestas:</p>
|
97 |
-
<h3>Q: ¿Cuándo se lanzó 50 Cent 21? </h3>
|
98 |
-
<p>A: 50 Cent 21 fue lanzado el 4 de marzo de 2003 como el segundo sencillo de su álbum debut Get Rich or Die Tryin'. </p>
|
99 |
-
<h3>Q: ¿Quién canta el coro de 50 Cent 21? </h3>
|
100 |
-
<p>A: El coro de 50 Cent 21 es cantado por Nate Dogg, que era un famoso cantante y rapero en la Costa Oeste de la escena de hip hop. Falleció en 2011 debido a complicaciones de múltiples accidentes cerebrovasculares. </p>
|
101 |
-
<h3>Q: ¿Cuál es la muestra utilizada en 50 Cent 21? </h3>
|
102 |
-
<p>A: La muestra utilizada en 50 Cent 21 es "It’s Only Love Doing Its Thing" de Barry White, que es una canción conmovedora y romántica de 1978. </p>
|
103 |
-
<h3>P: ¿Cuántas preguntas hay en 50 Cent 21? </h3>
|
104 |
-
<p>A: En realidad hay más de 21 preguntas en la canción, ya que algunas de ellas se repiten o reformulan. El número exacto de preguntas depende de cómo las cuentes, pero es de 25 a 30. </p>
|
105 |
-
<h3>Q: ¿Cómo puedo descargar o transmitir otras canciones por 50 Cent? </h3>
|
106 |
-
<p>A: Puede descargar o transmitir otras canciones por 50 Cent utilizando los mismos métodos y sitios web que hemos mencionado en este artículo. También puedes ver sus otros álbumes, como The Massacre, Curtis, Animal Ambition y Street King Immortal.</p> 64aa2da5cf<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Fuego Max Galaxy Store.md
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar gratis Fire MAX Galaxy Store</h1>
|
3 |
-
<p>Si eres un fan de los juegos battle royale, es posible que hayas oído hablar de Free Fire, uno de los juegos más populares y descargados del mundo. Pero ¿sabías que hay una nueva versión de Free Fire que ofrece una experiencia de juego más premium e inmersiva? Se llama Free Fire MAX, y está disponible para su descarga desde la Samsung Galaxy Store. En este artículo, te diremos qué es Free Fire MAX, por qué deberías descargarlo desde Galaxy Store y cómo hacerlo en unos sencillos pasos. </p>
|
4 |
-
<h2>Características de Free Fire MAX</h2>
|
5 |
-
<p>Free Fire MAX está diseñado exclusivamente para ofrecer una experiencia de juego premium en un battle royale. Tiene el mismo núcleo de juego como Free Fire, pero con gráficos mejorados, efectos especiales y un rendimiento más suave. Estas son algunas de las características que hacen que Free Fire MAX destaque:</p>
|
6 |
-
<h2>descargar gratis fuego max galaxy store</h2><br /><p><b><b>Download</b> ··· <a href="https://bltlly.com/2v6Mc3">https://bltlly.com/2v6Mc3</a></b></p><br /><br />
|
7 |
-
<h3>Juego rápido, profundamente inmersivo</h3>
|
8 |
-
<p>En Free Fire MAX, te lanzarás en paracaídas a una isla desierta con otros 49 jugadores y lucharás por la supervivencia. Tendrás que buscar armas y suministros, esconderte de los enemigos y acabar con cualquiera que se interponga en tu camino. El juego dura unos 10 minutos, y solo un jugador o equipo saldrá victorioso. Con gráficos rediseñados y mejorados, sentirás que estás en medio de la acción. </p>
|
9 |
-
<h3>Mismo juego, mejor experiencia</h3>
|
10 |
-
<p>Free Fire MAX ofrece gráficos HD, efectos especiales mejorados y una jugabilidad más suave que proporcionan una experiencia de supervivencia realista e inmersiva. Podrás ver cada detalle del entorno, desde la hierba hasta los edificios, y disfrutar de efectos impresionantes como explosiones, fuego y humo. También experimentará menos retraso y más estabilidad mientras juega. </p>
|
11 |
-
<h3>4-man squad, con chat de voz en el juego</h3>
|
12 |
-
|
13 |
-
<h3>Tecnología Firelink</h3>
|
14 |
-
<p>Con la tecnología Firelink, puede iniciar sesión en su cuenta de Free Fire existente para jugar Free Fire MAX sin ningún problema. Su progreso y elementos se mantienen en ambas aplicaciones en tiempo real. También puedes jugar todos los modos de juego con los jugadores de Free Fire y Free Fire MAX juntos, sin importar la aplicación que usen. </p>
|
15 |
-
<h2>Beneficios de la tienda Galaxy</h2>
|
16 |
-
<p>Si usted es un usuario de Samsung Galaxy, puede disfrutar de beneficios exclusivos al descargar Free Fire MAX de Galaxy Store. Galaxy Store es la tienda de aplicaciones oficial para dispositivos Samsung que ofrece juegos, aplicaciones, herramientas, temas, fondos de pantalla, fuentes, caras de reloj y más. Estos son algunos de los beneficios que ofrece Galaxy Store:</p>
|
17 |
-
<h3>Ofertas exclusivas para usuarios de Galaxy</h3>
|
18 |
-
<p>Al descargar juegos de Galaxy Store, puede obtener recompensas exclusivas, descuentos y promociones que no están disponibles en otros lugares. Por ejemplo, puedes obtener una piel de paracaídas con temática de Galaxy gratis al descargar Free Fire MAX de Galaxy Store. También puedes obtener hasta un 50% de descuento en compras y paquetes dentro del juego. Estas ofertas se actualizan regularmente, así que asegúrate de consultarlas. </p>
|
19 |
-
<h3>Personaliza tu dispositivo Galaxy</h3>
|
20 |
-
<p>Con Galaxy Store, puede personalizar su dispositivo Galaxy para adaptarse a su estilo y preferencias. Puede descargar temas, fondos de pantalla, fuentes, iconos y más para cambiar la apariencia de su dispositivo. También puede descargar herramientas y utilidades que mejoran la funcionalidad y el rendimiento de su dispositivo. Por ejemplo, puedes descargar Game Launcher, una herramienta que optimiza tu dispositivo para juegos y te permite acceder a todos tus juegos en un solo lugar. </p>
|
21 |
-
<h3>Caras de reloj y aplicaciones Premium</h3>
|
22 |
-
|
23 |
-
<h2>Cómo descargar Free Fire MAX de Galaxy Store</h2>
|
24 |
-
<p>Descargar Free Fire MAX de Galaxy Store es fácil y rápido. Solo tienes que seguir estos pasos:</p>
|
25 |
-
<h3>Paso 1: Abra la aplicación Galaxy Store en su dispositivo</h3>
|
26 |
-
<p>Si tienes un dispositivo Samsung Galaxy, ya deberías tener la aplicación Galaxy Store instalada en tu dispositivo. Si no, puedes descargarlo desde Google Play Store o el sitio web de Samsung. Una vez que tengas la aplicación, ábrela e inicia sesión con tu cuenta de Samsung. </p>
|
27 |
-
<p></p>
|
28 |
-
<h3>Paso 2: Búsqueda de fuego libre MAX en la barra de búsqueda</h3>
|
29 |
-
<p>En la pantalla de inicio de Galaxy Store, verá una barra de búsqueda en la parte superior. Escriba "Free Fire MAX" y toque en el icono de búsqueda. Verás la aplicación Free Fire MAX entre los resultados de búsqueda. </p>
|
30 |
-
<h3>Paso 3: Toque en el botón de instalación y espere a que la descarga se complete</h3>
|
31 |
-
<p>Toque en el icono de la aplicación Free Fire MAX y verá una página con más información sobre la aplicación, como capturas de pantalla, calificaciones, comentarios y descripción. También verá un botón de instalación en la parte inferior. Toque en él y acepte los permisos requeridos por la aplicación. La aplicación comenzará a descargarse en el dispositivo. Dependiendo de la velocidad de Internet y el almacenamiento del dispositivo, esto puede tardar unos minutos. </p>
|
32 |
-
<h3>Paso 4: Lanzar fuego libre MAX y disfrutar del juego</h3>
|
33 |
-
<p>Una vez completada la descarga, verá un botón abierto en la parte inferior de la página de la aplicación. Toque en él y Free Fire MAX se iniciará en su dispositivo. También puede encontrar el icono de la aplicación en la pantalla de inicio o en el cajón de la aplicación. Ahora puede iniciar sesión con su cuenta de Free Fire existente o crear una nueva. También puedes vincular tu cuenta de Facebook o Google para sincronizar tu progreso entre dispositivos. Disfruta jugando Free Fire MAX con gráficos y características mejorados. </p>
|
34 |
-
<h2>Conclusión</h2>
|
35 |
-
|
36 |
-
<h2>Preguntas frecuentes</h2>
|
37 |
-
<h3>¿Cuál es la diferencia entre Free Fire y Free Fire MAX? </h3>
|
38 |
-
<p>Free Fire y Free Fire MAX son juegos battle royale que tienen el mismo juego central. Sin embargo, Free Fire MAX tiene gráficos, efectos y rendimiento mejorados que proporcionan una experiencia más premium e inmersiva. Gratis Fire MAX también tiene algunas características exclusivas como el modo de escuadrón de 4 hombres y en el juego de chat de voz. </p>
|
39 |
-
<h3>¿Puedo jugar Free Fire MAX con jugadores Free Fire? </h3>
|
40 |
-
<p>Sí, puedes jugar Free Fire MAX con jugadores Free Fire usando la tecnología Firelink. Esto significa que puede utilizar su cuenta de Free Fire existente para iniciar sesión en Free Fire MAX y jugar con sus amigos que están utilizando cualquiera de las aplicaciones. También puedes jugar todos los modos de juego con ambas aplicaciones juntas. </p>
|
41 |
-
<h3>¿Cuánto espacio de almacenamiento requiere Free Fire MAX? </h3>
|
42 |
-
<p>Free Fire MAX requiere aproximadamente 1 GB de espacio de almacenamiento en su dispositivo. Sin embargo, esto puede variar dependiendo del modelo de su dispositivo y del sistema operativo. También puede necesitar espacio adicional para actualizaciones y parches. </p>
|
43 |
-
<h3>¿Free Fire MAX es compatible con mi dispositivo? </h3>
|
44 |
-
<p>Free Fire MAX es compatible con la mayoría de dispositivos Android que tienen al menos 2 GB de RAM y se ejecutan en Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego debido a limitaciones de hardware o problemas de compatibilidad. Puedes comprobar la compatibilidad de tu dispositivo en la página de la aplicación Galaxy Store antes de descargar el juego. </p>
|
45 |
-
<h3>¿Cómo puedo contactar al servicio al cliente si tengo algún problema con Free Fire MAX? </h3>
|
46 |
-
<p>Si tiene algún problema con Free Fire MAX, como errores, errores, fallos o problemas de cuenta, puede ponerse en contacto con el servicio al cliente a través de los siguientes canales:</p>
|
47 |
-
<ul>
|
48 |
-
<li>En el juego: Toque en el icono de configuración en la esquina superior derecha de la pantalla principal y seleccione "Servicio al cliente". Puede enviar un ticket con su problema y adjuntar capturas de pantalla si es necesario. </li>
|
49 |
-
<li>Correo electrónico: Envíe un correo electrónico a [email protected] con su problema y su ID de usuario.</li>
|
50 |
-
|
51 |
-
</ul>
|
52 |
-
<p>Servicio al cliente intentará responder a su problema lo antes posible. </p> 64aa2da5cf<br />
|
53 |
-
<br />
|
54 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/winterm_test.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
|
2 |
-
import sys
|
3 |
-
from unittest import TestCase, main, skipUnless
|
4 |
-
|
5 |
-
try:
|
6 |
-
from unittest.mock import Mock, patch
|
7 |
-
except ImportError:
|
8 |
-
from mock import Mock, patch
|
9 |
-
|
10 |
-
from ..winterm import WinColor, WinStyle, WinTerm
|
11 |
-
|
12 |
-
|
13 |
-
class WinTermTest(TestCase):
|
14 |
-
|
15 |
-
@patch('colorama.winterm.win32')
|
16 |
-
def testInit(self, mockWin32):
|
17 |
-
mockAttr = Mock()
|
18 |
-
mockAttr.wAttributes = 7 + 6 * 16 + 8
|
19 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
20 |
-
term = WinTerm()
|
21 |
-
self.assertEqual(term._fore, 7)
|
22 |
-
self.assertEqual(term._back, 6)
|
23 |
-
self.assertEqual(term._style, 8)
|
24 |
-
|
25 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
26 |
-
def testGetAttrs(self):
|
27 |
-
term = WinTerm()
|
28 |
-
|
29 |
-
term._fore = 0
|
30 |
-
term._back = 0
|
31 |
-
term._style = 0
|
32 |
-
self.assertEqual(term.get_attrs(), 0)
|
33 |
-
|
34 |
-
term._fore = WinColor.YELLOW
|
35 |
-
self.assertEqual(term.get_attrs(), WinColor.YELLOW)
|
36 |
-
|
37 |
-
term._back = WinColor.MAGENTA
|
38 |
-
self.assertEqual(
|
39 |
-
term.get_attrs(),
|
40 |
-
WinColor.YELLOW + WinColor.MAGENTA * 16)
|
41 |
-
|
42 |
-
term._style = WinStyle.BRIGHT
|
43 |
-
self.assertEqual(
|
44 |
-
term.get_attrs(),
|
45 |
-
WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT)
|
46 |
-
|
47 |
-
@patch('colorama.winterm.win32')
|
48 |
-
def testResetAll(self, mockWin32):
|
49 |
-
mockAttr = Mock()
|
50 |
-
mockAttr.wAttributes = 1 + 2 * 16 + 8
|
51 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
52 |
-
term = WinTerm()
|
53 |
-
|
54 |
-
term.set_console = Mock()
|
55 |
-
term._fore = -1
|
56 |
-
term._back = -1
|
57 |
-
term._style = -1
|
58 |
-
|
59 |
-
term.reset_all()
|
60 |
-
|
61 |
-
self.assertEqual(term._fore, 1)
|
62 |
-
self.assertEqual(term._back, 2)
|
63 |
-
self.assertEqual(term._style, 8)
|
64 |
-
self.assertEqual(term.set_console.called, True)
|
65 |
-
|
66 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
67 |
-
def testFore(self):
|
68 |
-
term = WinTerm()
|
69 |
-
term.set_console = Mock()
|
70 |
-
term._fore = 0
|
71 |
-
|
72 |
-
term.fore(5)
|
73 |
-
|
74 |
-
self.assertEqual(term._fore, 5)
|
75 |
-
self.assertEqual(term.set_console.called, True)
|
76 |
-
|
77 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
78 |
-
def testBack(self):
|
79 |
-
term = WinTerm()
|
80 |
-
term.set_console = Mock()
|
81 |
-
term._back = 0
|
82 |
-
|
83 |
-
term.back(5)
|
84 |
-
|
85 |
-
self.assertEqual(term._back, 5)
|
86 |
-
self.assertEqual(term.set_console.called, True)
|
87 |
-
|
88 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
89 |
-
def testStyle(self):
|
90 |
-
term = WinTerm()
|
91 |
-
term.set_console = Mock()
|
92 |
-
term._style = 0
|
93 |
-
|
94 |
-
term.style(22)
|
95 |
-
|
96 |
-
self.assertEqual(term._style, 22)
|
97 |
-
self.assertEqual(term.set_console.called, True)
|
98 |
-
|
99 |
-
@patch('colorama.winterm.win32')
|
100 |
-
def testSetConsole(self, mockWin32):
|
101 |
-
mockAttr = Mock()
|
102 |
-
mockAttr.wAttributes = 0
|
103 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
104 |
-
term = WinTerm()
|
105 |
-
term.windll = Mock()
|
106 |
-
|
107 |
-
term.set_console()
|
108 |
-
|
109 |
-
self.assertEqual(
|
110 |
-
mockWin32.SetConsoleTextAttribute.call_args,
|
111 |
-
((mockWin32.STDOUT, term.get_attrs()), {})
|
112 |
-
)
|
113 |
-
|
114 |
-
@patch('colorama.winterm.win32')
|
115 |
-
def testSetConsoleOnStderr(self, mockWin32):
|
116 |
-
mockAttr = Mock()
|
117 |
-
mockAttr.wAttributes = 0
|
118 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
119 |
-
term = WinTerm()
|
120 |
-
term.windll = Mock()
|
121 |
-
|
122 |
-
term.set_console(on_stderr=True)
|
123 |
-
|
124 |
-
self.assertEqual(
|
125 |
-
mockWin32.SetConsoleTextAttribute.call_args,
|
126 |
-
((mockWin32.STDERR, term.get_attrs()), {})
|
127 |
-
)
|
128 |
-
|
129 |
-
|
130 |
-
if __name__ == '__main__':
|
131 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/msvc9compiler.py
DELETED
@@ -1,832 +0,0 @@
|
|
1 |
-
"""distutils.msvc9compiler
|
2 |
-
|
3 |
-
Contains MSVCCompiler, an implementation of the abstract CCompiler class
|
4 |
-
for the Microsoft Visual Studio 2008.
|
5 |
-
|
6 |
-
The module is compatible with VS 2005 and VS 2008. You can find legacy support
|
7 |
-
for older versions of VS in distutils.msvccompiler.
|
8 |
-
"""
|
9 |
-
|
10 |
-
# Written by Perry Stoll
|
11 |
-
# hacked by Robin Becker and Thomas Heller to do a better job of
|
12 |
-
# finding DevStudio (through the registry)
|
13 |
-
# ported to VS2005 and VS 2008 by Christian Heimes
|
14 |
-
|
15 |
-
import os
|
16 |
-
import subprocess
|
17 |
-
import sys
|
18 |
-
import re
|
19 |
-
import warnings
|
20 |
-
|
21 |
-
from distutils.errors import (
|
22 |
-
DistutilsExecError,
|
23 |
-
DistutilsPlatformError,
|
24 |
-
CompileError,
|
25 |
-
LibError,
|
26 |
-
LinkError,
|
27 |
-
)
|
28 |
-
from distutils.ccompiler import CCompiler, gen_lib_options
|
29 |
-
from distutils import log
|
30 |
-
from distutils.util import get_platform
|
31 |
-
|
32 |
-
import winreg
|
33 |
-
|
34 |
-
warnings.warn(
|
35 |
-
"msvc9compiler is deprecated and slated to be removed "
|
36 |
-
"in the future. Please discontinue use or file an issue "
|
37 |
-
"with pypa/distutils describing your use case.",
|
38 |
-
DeprecationWarning,
|
39 |
-
)
|
40 |
-
|
41 |
-
RegOpenKeyEx = winreg.OpenKeyEx
|
42 |
-
RegEnumKey = winreg.EnumKey
|
43 |
-
RegEnumValue = winreg.EnumValue
|
44 |
-
RegError = winreg.error
|
45 |
-
|
46 |
-
HKEYS = (
|
47 |
-
winreg.HKEY_USERS,
|
48 |
-
winreg.HKEY_CURRENT_USER,
|
49 |
-
winreg.HKEY_LOCAL_MACHINE,
|
50 |
-
winreg.HKEY_CLASSES_ROOT,
|
51 |
-
)
|
52 |
-
|
53 |
-
NATIVE_WIN64 = sys.platform == 'win32' and sys.maxsize > 2**32
|
54 |
-
if NATIVE_WIN64:
|
55 |
-
# Visual C++ is a 32-bit application, so we need to look in
|
56 |
-
# the corresponding registry branch, if we're running a
|
57 |
-
# 64-bit Python on Win64
|
58 |
-
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
|
59 |
-
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
|
60 |
-
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
|
61 |
-
else:
|
62 |
-
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
|
63 |
-
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
|
64 |
-
NET_BASE = r"Software\Microsoft\.NETFramework"
|
65 |
-
|
66 |
-
# A map keyed by get_platform() return values to values accepted by
|
67 |
-
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
|
68 |
-
# the param to cross-compile on x86 targeting amd64.)
|
69 |
-
PLAT_TO_VCVARS = {
|
70 |
-
'win32': 'x86',
|
71 |
-
'win-amd64': 'amd64',
|
72 |
-
}
|
73 |
-
|
74 |
-
|
75 |
-
class Reg:
|
76 |
-
"""Helper class to read values from the registry"""
|
77 |
-
|
78 |
-
def get_value(cls, path, key):
|
79 |
-
for base in HKEYS:
|
80 |
-
d = cls.read_values(base, path)
|
81 |
-
if d and key in d:
|
82 |
-
return d[key]
|
83 |
-
raise KeyError(key)
|
84 |
-
|
85 |
-
get_value = classmethod(get_value)
|
86 |
-
|
87 |
-
def read_keys(cls, base, key):
|
88 |
-
"""Return list of registry keys."""
|
89 |
-
try:
|
90 |
-
handle = RegOpenKeyEx(base, key)
|
91 |
-
except RegError:
|
92 |
-
return None
|
93 |
-
L = []
|
94 |
-
i = 0
|
95 |
-
while True:
|
96 |
-
try:
|
97 |
-
k = RegEnumKey(handle, i)
|
98 |
-
except RegError:
|
99 |
-
break
|
100 |
-
L.append(k)
|
101 |
-
i += 1
|
102 |
-
return L
|
103 |
-
|
104 |
-
read_keys = classmethod(read_keys)
|
105 |
-
|
106 |
-
def read_values(cls, base, key):
|
107 |
-
"""Return dict of registry keys and values.
|
108 |
-
|
109 |
-
All names are converted to lowercase.
|
110 |
-
"""
|
111 |
-
try:
|
112 |
-
handle = RegOpenKeyEx(base, key)
|
113 |
-
except RegError:
|
114 |
-
return None
|
115 |
-
d = {}
|
116 |
-
i = 0
|
117 |
-
while True:
|
118 |
-
try:
|
119 |
-
name, value, type = RegEnumValue(handle, i)
|
120 |
-
except RegError:
|
121 |
-
break
|
122 |
-
name = name.lower()
|
123 |
-
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
|
124 |
-
i += 1
|
125 |
-
return d
|
126 |
-
|
127 |
-
read_values = classmethod(read_values)
|
128 |
-
|
129 |
-
def convert_mbcs(s):
|
130 |
-
dec = getattr(s, "decode", None)
|
131 |
-
if dec is not None:
|
132 |
-
try:
|
133 |
-
s = dec("mbcs")
|
134 |
-
except UnicodeError:
|
135 |
-
pass
|
136 |
-
return s
|
137 |
-
|
138 |
-
convert_mbcs = staticmethod(convert_mbcs)
|
139 |
-
|
140 |
-
|
141 |
-
class MacroExpander:
|
142 |
-
def __init__(self, version):
|
143 |
-
self.macros = {}
|
144 |
-
self.vsbase = VS_BASE % version
|
145 |
-
self.load_macros(version)
|
146 |
-
|
147 |
-
def set_macro(self, macro, path, key):
|
148 |
-
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
|
149 |
-
|
150 |
-
def load_macros(self, version):
|
151 |
-
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
|
152 |
-
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
|
153 |
-
self.set_macro("FrameworkDir", NET_BASE, "installroot")
|
154 |
-
try:
|
155 |
-
if version >= 8.0:
|
156 |
-
self.set_macro("FrameworkSDKDir", NET_BASE, "sdkinstallrootv2.0")
|
157 |
-
else:
|
158 |
-
raise KeyError("sdkinstallrootv2.0")
|
159 |
-
except KeyError:
|
160 |
-
raise DistutilsPlatformError(
|
161 |
-
"""Python was built with Visual Studio 2008;
|
162 |
-
extensions must be built with a compiler than can generate compatible binaries.
|
163 |
-
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
|
164 |
-
you can try compiling with MingW32, by passing "-c mingw32" to setup.py."""
|
165 |
-
)
|
166 |
-
|
167 |
-
if version >= 9.0:
|
168 |
-
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
|
169 |
-
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
|
170 |
-
else:
|
171 |
-
p = r"Software\Microsoft\NET Framework Setup\Product"
|
172 |
-
for base in HKEYS:
|
173 |
-
try:
|
174 |
-
h = RegOpenKeyEx(base, p)
|
175 |
-
except RegError:
|
176 |
-
continue
|
177 |
-
key = RegEnumKey(h, 0)
|
178 |
-
d = Reg.get_value(base, r"{}\{}".format(p, key))
|
179 |
-
self.macros["$(FrameworkVersion)"] = d["version"]
|
180 |
-
|
181 |
-
def sub(self, s):
|
182 |
-
for k, v in self.macros.items():
|
183 |
-
s = s.replace(k, v)
|
184 |
-
return s
|
185 |
-
|
186 |
-
|
187 |
-
def get_build_version():
|
188 |
-
"""Return the version of MSVC that was used to build Python.
|
189 |
-
|
190 |
-
For Python 2.3 and up, the version number is included in
|
191 |
-
sys.version. For earlier versions, assume the compiler is MSVC 6.
|
192 |
-
"""
|
193 |
-
prefix = "MSC v."
|
194 |
-
i = sys.version.find(prefix)
|
195 |
-
if i == -1:
|
196 |
-
return 6
|
197 |
-
i = i + len(prefix)
|
198 |
-
s, rest = sys.version[i:].split(" ", 1)
|
199 |
-
majorVersion = int(s[:-2]) - 6
|
200 |
-
if majorVersion >= 13:
|
201 |
-
# v13 was skipped and should be v14
|
202 |
-
majorVersion += 1
|
203 |
-
minorVersion = int(s[2:3]) / 10.0
|
204 |
-
# I don't think paths are affected by minor version in version 6
|
205 |
-
if majorVersion == 6:
|
206 |
-
minorVersion = 0
|
207 |
-
if majorVersion >= 6:
|
208 |
-
return majorVersion + minorVersion
|
209 |
-
# else we don't know what version of the compiler this is
|
210 |
-
return None
|
211 |
-
|
212 |
-
|
213 |
-
def normalize_and_reduce_paths(paths):
|
214 |
-
"""Return a list of normalized paths with duplicates removed.
|
215 |
-
|
216 |
-
The current order of paths is maintained.
|
217 |
-
"""
|
218 |
-
# Paths are normalized so things like: /a and /a/ aren't both preserved.
|
219 |
-
reduced_paths = []
|
220 |
-
for p in paths:
|
221 |
-
np = os.path.normpath(p)
|
222 |
-
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
|
223 |
-
if np not in reduced_paths:
|
224 |
-
reduced_paths.append(np)
|
225 |
-
return reduced_paths
|
226 |
-
|
227 |
-
|
228 |
-
def removeDuplicates(variable):
|
229 |
-
"""Remove duplicate values of an environment variable."""
|
230 |
-
oldList = variable.split(os.pathsep)
|
231 |
-
newList = []
|
232 |
-
for i in oldList:
|
233 |
-
if i not in newList:
|
234 |
-
newList.append(i)
|
235 |
-
newVariable = os.pathsep.join(newList)
|
236 |
-
return newVariable
|
237 |
-
|
238 |
-
|
239 |
-
def find_vcvarsall(version):
|
240 |
-
"""Find the vcvarsall.bat file
|
241 |
-
|
242 |
-
At first it tries to find the productdir of VS 2008 in the registry. If
|
243 |
-
that fails it falls back to the VS90COMNTOOLS env var.
|
244 |
-
"""
|
245 |
-
vsbase = VS_BASE % version
|
246 |
-
try:
|
247 |
-
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, "productdir")
|
248 |
-
except KeyError:
|
249 |
-
log.debug("Unable to find productdir in registry")
|
250 |
-
productdir = None
|
251 |
-
|
252 |
-
if not productdir or not os.path.isdir(productdir):
|
253 |
-
toolskey = "VS%0.f0COMNTOOLS" % version
|
254 |
-
toolsdir = os.environ.get(toolskey, None)
|
255 |
-
|
256 |
-
if toolsdir and os.path.isdir(toolsdir):
|
257 |
-
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
|
258 |
-
productdir = os.path.abspath(productdir)
|
259 |
-
if not os.path.isdir(productdir):
|
260 |
-
log.debug("%s is not a valid directory" % productdir)
|
261 |
-
return None
|
262 |
-
else:
|
263 |
-
log.debug("Env var %s is not set or invalid" % toolskey)
|
264 |
-
if not productdir:
|
265 |
-
log.debug("No productdir found")
|
266 |
-
return None
|
267 |
-
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
|
268 |
-
if os.path.isfile(vcvarsall):
|
269 |
-
return vcvarsall
|
270 |
-
log.debug("Unable to find vcvarsall.bat")
|
271 |
-
return None
|
272 |
-
|
273 |
-
|
274 |
-
def query_vcvarsall(version, arch="x86"):
|
275 |
-
"""Launch vcvarsall.bat and read the settings from its environment"""
|
276 |
-
vcvarsall = find_vcvarsall(version)
|
277 |
-
interesting = {"include", "lib", "libpath", "path"}
|
278 |
-
result = {}
|
279 |
-
|
280 |
-
if vcvarsall is None:
|
281 |
-
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
|
282 |
-
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
|
283 |
-
popen = subprocess.Popen(
|
284 |
-
'"{}" {} & set'.format(vcvarsall, arch),
|
285 |
-
stdout=subprocess.PIPE,
|
286 |
-
stderr=subprocess.PIPE,
|
287 |
-
)
|
288 |
-
try:
|
289 |
-
stdout, stderr = popen.communicate()
|
290 |
-
if popen.wait() != 0:
|
291 |
-
raise DistutilsPlatformError(stderr.decode("mbcs"))
|
292 |
-
|
293 |
-
stdout = stdout.decode("mbcs")
|
294 |
-
for line in stdout.split("\n"):
|
295 |
-
line = Reg.convert_mbcs(line)
|
296 |
-
if '=' not in line:
|
297 |
-
continue
|
298 |
-
line = line.strip()
|
299 |
-
key, value = line.split('=', 1)
|
300 |
-
key = key.lower()
|
301 |
-
if key in interesting:
|
302 |
-
if value.endswith(os.pathsep):
|
303 |
-
value = value[:-1]
|
304 |
-
result[key] = removeDuplicates(value)
|
305 |
-
|
306 |
-
finally:
|
307 |
-
popen.stdout.close()
|
308 |
-
popen.stderr.close()
|
309 |
-
|
310 |
-
if len(result) != len(interesting):
|
311 |
-
raise ValueError(str(list(result.keys())))
|
312 |
-
|
313 |
-
return result
|
314 |
-
|
315 |
-
|
316 |
-
# More globals
|
317 |
-
VERSION = get_build_version()
|
318 |
-
# MACROS = MacroExpander(VERSION)
|
319 |
-
|
320 |
-
|
321 |
-
class MSVCCompiler(CCompiler):
|
322 |
-
"""Concrete class that implements an interface to Microsoft Visual C++,
|
323 |
-
as defined by the CCompiler abstract class."""
|
324 |
-
|
325 |
-
compiler_type = 'msvc'
|
326 |
-
|
327 |
-
# Just set this so CCompiler's constructor doesn't barf. We currently
|
328 |
-
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
|
329 |
-
# as it really isn't necessary for this sort of single-compiler class.
|
330 |
-
# Would be nice to have a consistent interface with UnixCCompiler,
|
331 |
-
# though, so it's worth thinking about.
|
332 |
-
executables = {}
|
333 |
-
|
334 |
-
# Private class data (need to distinguish C from C++ source for compiler)
|
335 |
-
_c_extensions = ['.c']
|
336 |
-
_cpp_extensions = ['.cc', '.cpp', '.cxx']
|
337 |
-
_rc_extensions = ['.rc']
|
338 |
-
_mc_extensions = ['.mc']
|
339 |
-
|
340 |
-
# Needed for the filename generation methods provided by the
|
341 |
-
# base class, CCompiler.
|
342 |
-
src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions
|
343 |
-
res_extension = '.res'
|
344 |
-
obj_extension = '.obj'
|
345 |
-
static_lib_extension = '.lib'
|
346 |
-
shared_lib_extension = '.dll'
|
347 |
-
static_lib_format = shared_lib_format = '%s%s'
|
348 |
-
exe_extension = '.exe'
|
349 |
-
|
350 |
-
def __init__(self, verbose=0, dry_run=0, force=0):
|
351 |
-
super().__init__(verbose, dry_run, force)
|
352 |
-
self.__version = VERSION
|
353 |
-
self.__root = r"Software\Microsoft\VisualStudio"
|
354 |
-
# self.__macros = MACROS
|
355 |
-
self.__paths = []
|
356 |
-
# target platform (.plat_name is consistent with 'bdist')
|
357 |
-
self.plat_name = None
|
358 |
-
self.__arch = None # deprecated name
|
359 |
-
self.initialized = False
|
360 |
-
|
361 |
-
def initialize(self, plat_name=None): # noqa: C901
|
362 |
-
# multi-init means we would need to check platform same each time...
|
363 |
-
assert not self.initialized, "don't init multiple times"
|
364 |
-
if self.__version < 8.0:
|
365 |
-
raise DistutilsPlatformError(
|
366 |
-
"VC %0.1f is not supported by this module" % self.__version
|
367 |
-
)
|
368 |
-
if plat_name is None:
|
369 |
-
plat_name = get_platform()
|
370 |
-
# sanity check for platforms to prevent obscure errors later.
|
371 |
-
ok_plats = 'win32', 'win-amd64'
|
372 |
-
if plat_name not in ok_plats:
|
373 |
-
raise DistutilsPlatformError(
|
374 |
-
"--plat-name must be one of {}".format(ok_plats)
|
375 |
-
)
|
376 |
-
|
377 |
-
if (
|
378 |
-
"DISTUTILS_USE_SDK" in os.environ
|
379 |
-
and "MSSdk" in os.environ
|
380 |
-
and self.find_exe("cl.exe")
|
381 |
-
):
|
382 |
-
# Assume that the SDK set up everything alright; don't try to be
|
383 |
-
# smarter
|
384 |
-
self.cc = "cl.exe"
|
385 |
-
self.linker = "link.exe"
|
386 |
-
self.lib = "lib.exe"
|
387 |
-
self.rc = "rc.exe"
|
388 |
-
self.mc = "mc.exe"
|
389 |
-
else:
|
390 |
-
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
|
391 |
-
# to cross compile, you use 'x86_amd64'.
|
392 |
-
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
|
393 |
-
# compile use 'x86' (ie, it runs the x86 compiler directly)
|
394 |
-
if plat_name == get_platform() or plat_name == 'win32':
|
395 |
-
# native build or cross-compile to win32
|
396 |
-
plat_spec = PLAT_TO_VCVARS[plat_name]
|
397 |
-
else:
|
398 |
-
# cross compile from win32 -> some 64bit
|
399 |
-
plat_spec = (
|
400 |
-
PLAT_TO_VCVARS[get_platform()] + '_' + PLAT_TO_VCVARS[plat_name]
|
401 |
-
)
|
402 |
-
|
403 |
-
vc_env = query_vcvarsall(VERSION, plat_spec)
|
404 |
-
|
405 |
-
self.__paths = vc_env['path'].split(os.pathsep)
|
406 |
-
os.environ['lib'] = vc_env['lib']
|
407 |
-
os.environ['include'] = vc_env['include']
|
408 |
-
|
409 |
-
if len(self.__paths) == 0:
|
410 |
-
raise DistutilsPlatformError(
|
411 |
-
"Python was built with %s, "
|
412 |
-
"and extensions need to be built with the same "
|
413 |
-
"version of the compiler, but it isn't installed." % self.__product
|
414 |
-
)
|
415 |
-
|
416 |
-
self.cc = self.find_exe("cl.exe")
|
417 |
-
self.linker = self.find_exe("link.exe")
|
418 |
-
self.lib = self.find_exe("lib.exe")
|
419 |
-
self.rc = self.find_exe("rc.exe") # resource compiler
|
420 |
-
self.mc = self.find_exe("mc.exe") # message compiler
|
421 |
-
# self.set_path_env_var('lib')
|
422 |
-
# self.set_path_env_var('include')
|
423 |
-
|
424 |
-
# extend the MSVC path with the current path
|
425 |
-
try:
|
426 |
-
for p in os.environ['path'].split(';'):
|
427 |
-
self.__paths.append(p)
|
428 |
-
except KeyError:
|
429 |
-
pass
|
430 |
-
self.__paths = normalize_and_reduce_paths(self.__paths)
|
431 |
-
os.environ['path'] = ";".join(self.__paths)
|
432 |
-
|
433 |
-
self.preprocess_options = None
|
434 |
-
if self.__arch == "x86":
|
435 |
-
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/DNDEBUG']
|
436 |
-
self.compile_options_debug = [
|
437 |
-
'/nologo',
|
438 |
-
'/Od',
|
439 |
-
'/MDd',
|
440 |
-
'/W3',
|
441 |
-
'/Z7',
|
442 |
-
'/D_DEBUG',
|
443 |
-
]
|
444 |
-
else:
|
445 |
-
# Win64
|
446 |
-
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GS-', '/DNDEBUG']
|
447 |
-
self.compile_options_debug = [
|
448 |
-
'/nologo',
|
449 |
-
'/Od',
|
450 |
-
'/MDd',
|
451 |
-
'/W3',
|
452 |
-
'/GS-',
|
453 |
-
'/Z7',
|
454 |
-
'/D_DEBUG',
|
455 |
-
]
|
456 |
-
|
457 |
-
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
|
458 |
-
if self.__version >= 7:
|
459 |
-
self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG']
|
460 |
-
self.ldflags_static = ['/nologo']
|
461 |
-
|
462 |
-
self.initialized = True
|
463 |
-
|
464 |
-
# -- Worker methods ------------------------------------------------
|
465 |
-
|
466 |
-
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
|
467 |
-
# Copied from ccompiler.py, extended to return .res as 'object'-file
|
468 |
-
# for .rc input file
|
469 |
-
if output_dir is None:
|
470 |
-
output_dir = ''
|
471 |
-
obj_names = []
|
472 |
-
for src_name in source_filenames:
|
473 |
-
(base, ext) = os.path.splitext(src_name)
|
474 |
-
base = os.path.splitdrive(base)[1] # Chop off the drive
|
475 |
-
base = base[os.path.isabs(base) :] # If abs, chop off leading /
|
476 |
-
if ext not in self.src_extensions:
|
477 |
-
# Better to raise an exception instead of silently continuing
|
478 |
-
# and later complain about sources and targets having
|
479 |
-
# different lengths
|
480 |
-
raise CompileError("Don't know how to compile %s" % src_name)
|
481 |
-
if strip_dir:
|
482 |
-
base = os.path.basename(base)
|
483 |
-
if ext in self._rc_extensions:
|
484 |
-
obj_names.append(os.path.join(output_dir, base + self.res_extension))
|
485 |
-
elif ext in self._mc_extensions:
|
486 |
-
obj_names.append(os.path.join(output_dir, base + self.res_extension))
|
487 |
-
else:
|
488 |
-
obj_names.append(os.path.join(output_dir, base + self.obj_extension))
|
489 |
-
return obj_names
|
490 |
-
|
491 |
-
def compile( # noqa: C901
|
492 |
-
self,
|
493 |
-
sources,
|
494 |
-
output_dir=None,
|
495 |
-
macros=None,
|
496 |
-
include_dirs=None,
|
497 |
-
debug=0,
|
498 |
-
extra_preargs=None,
|
499 |
-
extra_postargs=None,
|
500 |
-
depends=None,
|
501 |
-
):
|
502 |
-
|
503 |
-
if not self.initialized:
|
504 |
-
self.initialize()
|
505 |
-
compile_info = self._setup_compile(
|
506 |
-
output_dir, macros, include_dirs, sources, depends, extra_postargs
|
507 |
-
)
|
508 |
-
macros, objects, extra_postargs, pp_opts, build = compile_info
|
509 |
-
|
510 |
-
compile_opts = extra_preargs or []
|
511 |
-
compile_opts.append('/c')
|
512 |
-
if debug:
|
513 |
-
compile_opts.extend(self.compile_options_debug)
|
514 |
-
else:
|
515 |
-
compile_opts.extend(self.compile_options)
|
516 |
-
|
517 |
-
for obj in objects:
|
518 |
-
try:
|
519 |
-
src, ext = build[obj]
|
520 |
-
except KeyError:
|
521 |
-
continue
|
522 |
-
if debug:
|
523 |
-
# pass the full pathname to MSVC in debug mode,
|
524 |
-
# this allows the debugger to find the source file
|
525 |
-
# without asking the user to browse for it
|
526 |
-
src = os.path.abspath(src)
|
527 |
-
|
528 |
-
if ext in self._c_extensions:
|
529 |
-
input_opt = "/Tc" + src
|
530 |
-
elif ext in self._cpp_extensions:
|
531 |
-
input_opt = "/Tp" + src
|
532 |
-
elif ext in self._rc_extensions:
|
533 |
-
# compile .RC to .RES file
|
534 |
-
input_opt = src
|
535 |
-
output_opt = "/fo" + obj
|
536 |
-
try:
|
537 |
-
self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt])
|
538 |
-
except DistutilsExecError as msg:
|
539 |
-
raise CompileError(msg)
|
540 |
-
continue
|
541 |
-
elif ext in self._mc_extensions:
|
542 |
-
# Compile .MC to .RC file to .RES file.
|
543 |
-
# * '-h dir' specifies the directory for the
|
544 |
-
# generated include file
|
545 |
-
# * '-r dir' specifies the target directory of the
|
546 |
-
# generated RC file and the binary message resource
|
547 |
-
# it includes
|
548 |
-
#
|
549 |
-
# For now (since there are no options to change this),
|
550 |
-
# we use the source-directory for the include file and
|
551 |
-
# the build directory for the RC file and message
|
552 |
-
# resources. This works at least for win32all.
|
553 |
-
h_dir = os.path.dirname(src)
|
554 |
-
rc_dir = os.path.dirname(obj)
|
555 |
-
try:
|
556 |
-
# first compile .MC to .RC and .H file
|
557 |
-
self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src])
|
558 |
-
base, _ = os.path.splitext(os.path.basename(src))
|
559 |
-
rc_file = os.path.join(rc_dir, base + '.rc')
|
560 |
-
# then compile .RC to .RES file
|
561 |
-
self.spawn([self.rc] + ["/fo" + obj] + [rc_file])
|
562 |
-
|
563 |
-
except DistutilsExecError as msg:
|
564 |
-
raise CompileError(msg)
|
565 |
-
continue
|
566 |
-
else:
|
567 |
-
# how to handle this file?
|
568 |
-
raise CompileError(
|
569 |
-
"Don't know how to compile {} to {}".format(src, obj)
|
570 |
-
)
|
571 |
-
|
572 |
-
output_opt = "/Fo" + obj
|
573 |
-
try:
|
574 |
-
self.spawn(
|
575 |
-
[self.cc]
|
576 |
-
+ compile_opts
|
577 |
-
+ pp_opts
|
578 |
-
+ [input_opt, output_opt]
|
579 |
-
+ extra_postargs
|
580 |
-
)
|
581 |
-
except DistutilsExecError as msg:
|
582 |
-
raise CompileError(msg)
|
583 |
-
|
584 |
-
return objects
|
585 |
-
|
586 |
-
def create_static_lib(
|
587 |
-
self, objects, output_libname, output_dir=None, debug=0, target_lang=None
|
588 |
-
):
|
589 |
-
|
590 |
-
if not self.initialized:
|
591 |
-
self.initialize()
|
592 |
-
(objects, output_dir) = self._fix_object_args(objects, output_dir)
|
593 |
-
output_filename = self.library_filename(output_libname, output_dir=output_dir)
|
594 |
-
|
595 |
-
if self._need_link(objects, output_filename):
|
596 |
-
lib_args = objects + ['/OUT:' + output_filename]
|
597 |
-
if debug:
|
598 |
-
pass # XXX what goes here?
|
599 |
-
try:
|
600 |
-
self.spawn([self.lib] + lib_args)
|
601 |
-
except DistutilsExecError as msg:
|
602 |
-
raise LibError(msg)
|
603 |
-
else:
|
604 |
-
log.debug("skipping %s (up-to-date)", output_filename)
|
605 |
-
|
606 |
-
def link( # noqa: C901
|
607 |
-
self,
|
608 |
-
target_desc,
|
609 |
-
objects,
|
610 |
-
output_filename,
|
611 |
-
output_dir=None,
|
612 |
-
libraries=None,
|
613 |
-
library_dirs=None,
|
614 |
-
runtime_library_dirs=None,
|
615 |
-
export_symbols=None,
|
616 |
-
debug=0,
|
617 |
-
extra_preargs=None,
|
618 |
-
extra_postargs=None,
|
619 |
-
build_temp=None,
|
620 |
-
target_lang=None,
|
621 |
-
):
|
622 |
-
|
623 |
-
if not self.initialized:
|
624 |
-
self.initialize()
|
625 |
-
(objects, output_dir) = self._fix_object_args(objects, output_dir)
|
626 |
-
fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
|
627 |
-
(libraries, library_dirs, runtime_library_dirs) = fixed_args
|
628 |
-
|
629 |
-
if runtime_library_dirs:
|
630 |
-
self.warn(
|
631 |
-
"I don't know what to do with 'runtime_library_dirs': "
|
632 |
-
+ str(runtime_library_dirs)
|
633 |
-
)
|
634 |
-
|
635 |
-
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
|
636 |
-
if output_dir is not None:
|
637 |
-
output_filename = os.path.join(output_dir, output_filename)
|
638 |
-
|
639 |
-
if self._need_link(objects, output_filename):
|
640 |
-
if target_desc == CCompiler.EXECUTABLE:
|
641 |
-
if debug:
|
642 |
-
ldflags = self.ldflags_shared_debug[1:]
|
643 |
-
else:
|
644 |
-
ldflags = self.ldflags_shared[1:]
|
645 |
-
else:
|
646 |
-
if debug:
|
647 |
-
ldflags = self.ldflags_shared_debug
|
648 |
-
else:
|
649 |
-
ldflags = self.ldflags_shared
|
650 |
-
|
651 |
-
export_opts = []
|
652 |
-
for sym in export_symbols or []:
|
653 |
-
export_opts.append("/EXPORT:" + sym)
|
654 |
-
|
655 |
-
ld_args = (
|
656 |
-
ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]
|
657 |
-
)
|
658 |
-
|
659 |
-
# The MSVC linker generates .lib and .exp files, which cannot be
|
660 |
-
# suppressed by any linker switches. The .lib files may even be
|
661 |
-
# needed! Make sure they are generated in the temporary build
|
662 |
-
# directory. Since they have different names for debug and release
|
663 |
-
# builds, they can go into the same directory.
|
664 |
-
build_temp = os.path.dirname(objects[0])
|
665 |
-
if export_symbols is not None:
|
666 |
-
(dll_name, dll_ext) = os.path.splitext(
|
667 |
-
os.path.basename(output_filename)
|
668 |
-
)
|
669 |
-
implib_file = os.path.join(build_temp, self.library_filename(dll_name))
|
670 |
-
ld_args.append('/IMPLIB:' + implib_file)
|
671 |
-
|
672 |
-
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
|
673 |
-
|
674 |
-
if extra_preargs:
|
675 |
-
ld_args[:0] = extra_preargs
|
676 |
-
if extra_postargs:
|
677 |
-
ld_args.extend(extra_postargs)
|
678 |
-
|
679 |
-
self.mkpath(os.path.dirname(output_filename))
|
680 |
-
try:
|
681 |
-
self.spawn([self.linker] + ld_args)
|
682 |
-
except DistutilsExecError as msg:
|
683 |
-
raise LinkError(msg)
|
684 |
-
|
685 |
-
# embed the manifest
|
686 |
-
# XXX - this is somewhat fragile - if mt.exe fails, distutils
|
687 |
-
# will still consider the DLL up-to-date, but it will not have a
|
688 |
-
# manifest. Maybe we should link to a temp file? OTOH, that
|
689 |
-
# implies a build environment error that shouldn't go undetected.
|
690 |
-
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
|
691 |
-
if mfinfo is not None:
|
692 |
-
mffilename, mfid = mfinfo
|
693 |
-
out_arg = '-outputresource:{};{}'.format(output_filename, mfid)
|
694 |
-
try:
|
695 |
-
self.spawn(['mt.exe', '-nologo', '-manifest', mffilename, out_arg])
|
696 |
-
except DistutilsExecError as msg:
|
697 |
-
raise LinkError(msg)
|
698 |
-
else:
|
699 |
-
log.debug("skipping %s (up-to-date)", output_filename)
|
700 |
-
|
701 |
-
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
|
702 |
-
# If we need a manifest at all, an embedded manifest is recommended.
|
703 |
-
# See MSDN article titled
|
704 |
-
# "How to: Embed a Manifest Inside a C/C++ Application"
|
705 |
-
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
|
706 |
-
# Ask the linker to generate the manifest in the temp dir, so
|
707 |
-
# we can check it, and possibly embed it, later.
|
708 |
-
temp_manifest = os.path.join(
|
709 |
-
build_temp, os.path.basename(output_filename) + ".manifest"
|
710 |
-
)
|
711 |
-
ld_args.append('/MANIFESTFILE:' + temp_manifest)
|
712 |
-
|
713 |
-
def manifest_get_embed_info(self, target_desc, ld_args):
|
714 |
-
# If a manifest should be embedded, return a tuple of
|
715 |
-
# (manifest_filename, resource_id). Returns None if no manifest
|
716 |
-
# should be embedded. See http://bugs.python.org/issue7833 for why
|
717 |
-
# we want to avoid any manifest for extension modules if we can)
|
718 |
-
for arg in ld_args:
|
719 |
-
if arg.startswith("/MANIFESTFILE:"):
|
720 |
-
temp_manifest = arg.split(":", 1)[1]
|
721 |
-
break
|
722 |
-
else:
|
723 |
-
# no /MANIFESTFILE so nothing to do.
|
724 |
-
return None
|
725 |
-
if target_desc == CCompiler.EXECUTABLE:
|
726 |
-
# by default, executables always get the manifest with the
|
727 |
-
# CRT referenced.
|
728 |
-
mfid = 1
|
729 |
-
else:
|
730 |
-
# Extension modules try and avoid any manifest if possible.
|
731 |
-
mfid = 2
|
732 |
-
temp_manifest = self._remove_visual_c_ref(temp_manifest)
|
733 |
-
if temp_manifest is None:
|
734 |
-
return None
|
735 |
-
return temp_manifest, mfid
|
736 |
-
|
737 |
-
def _remove_visual_c_ref(self, manifest_file):
|
738 |
-
try:
|
739 |
-
# Remove references to the Visual C runtime, so they will
|
740 |
-
# fall through to the Visual C dependency of Python.exe.
|
741 |
-
# This way, when installed for a restricted user (e.g.
|
742 |
-
# runtimes are not in WinSxS folder, but in Python's own
|
743 |
-
# folder), the runtimes do not need to be in every folder
|
744 |
-
# with .pyd's.
|
745 |
-
# Returns either the filename of the modified manifest or
|
746 |
-
# None if no manifest should be embedded.
|
747 |
-
manifest_f = open(manifest_file)
|
748 |
-
try:
|
749 |
-
manifest_buf = manifest_f.read()
|
750 |
-
finally:
|
751 |
-
manifest_f.close()
|
752 |
-
pattern = re.compile(
|
753 |
-
r"""<assemblyIdentity.*?name=("|')Microsoft\."""
|
754 |
-
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
|
755 |
-
re.DOTALL,
|
756 |
-
)
|
757 |
-
manifest_buf = re.sub(pattern, "", manifest_buf)
|
758 |
-
pattern = r"<dependentAssembly>\s*</dependentAssembly>"
|
759 |
-
manifest_buf = re.sub(pattern, "", manifest_buf)
|
760 |
-
# Now see if any other assemblies are referenced - if not, we
|
761 |
-
# don't want a manifest embedded.
|
762 |
-
pattern = re.compile(
|
763 |
-
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
|
764 |
-
r""".*?(?:/>|</assemblyIdentity>)""",
|
765 |
-
re.DOTALL,
|
766 |
-
)
|
767 |
-
if re.search(pattern, manifest_buf) is None:
|
768 |
-
return None
|
769 |
-
|
770 |
-
manifest_f = open(manifest_file, 'w')
|
771 |
-
try:
|
772 |
-
manifest_f.write(manifest_buf)
|
773 |
-
return manifest_file
|
774 |
-
finally:
|
775 |
-
manifest_f.close()
|
776 |
-
except OSError:
|
777 |
-
pass
|
778 |
-
|
779 |
-
# -- Miscellaneous methods -----------------------------------------
|
780 |
-
# These are all used by the 'gen_lib_options() function, in
|
781 |
-
# ccompiler.py.
|
782 |
-
|
783 |
-
def library_dir_option(self, dir):
|
784 |
-
return "/LIBPATH:" + dir
|
785 |
-
|
786 |
-
def runtime_library_dir_option(self, dir):
|
787 |
-
raise DistutilsPlatformError(
|
788 |
-
"don't know how to set runtime library search path for MSVC++"
|
789 |
-
)
|
790 |
-
|
791 |
-
def library_option(self, lib):
|
792 |
-
return self.library_filename(lib)
|
793 |
-
|
794 |
-
def find_library_file(self, dirs, lib, debug=0):
|
795 |
-
# Prefer a debugging library if found (and requested), but deal
|
796 |
-
# with it if we don't have one.
|
797 |
-
if debug:
|
798 |
-
try_names = [lib + "_d", lib]
|
799 |
-
else:
|
800 |
-
try_names = [lib]
|
801 |
-
for dir in dirs:
|
802 |
-
for name in try_names:
|
803 |
-
libfile = os.path.join(dir, self.library_filename(name))
|
804 |
-
if os.path.exists(libfile):
|
805 |
-
return libfile
|
806 |
-
else:
|
807 |
-
# Oops, didn't find it in *any* of 'dirs'
|
808 |
-
return None
|
809 |
-
|
810 |
-
# Helper methods for using the MSVC registry settings
|
811 |
-
|
812 |
-
def find_exe(self, exe):
|
813 |
-
"""Return path to an MSVC executable program.
|
814 |
-
|
815 |
-
Tries to find the program in several places: first, one of the
|
816 |
-
MSVC program search paths from the registry; next, the directories
|
817 |
-
in the PATH environment variable. If any of those work, return an
|
818 |
-
absolute path that is known to exist. If none of them work, just
|
819 |
-
return the original program name, 'exe'.
|
820 |
-
"""
|
821 |
-
for p in self.__paths:
|
822 |
-
fn = os.path.join(os.path.abspath(p), exe)
|
823 |
-
if os.path.isfile(fn):
|
824 |
-
return fn
|
825 |
-
|
826 |
-
# didn't find it; try existing path
|
827 |
-
for p in os.environ['Path'].split(';'):
|
828 |
-
fn = os.path.join(os.path.abspath(p), exe)
|
829 |
-
if os.path.isfile(fn):
|
830 |
-
return fn
|
831 |
-
|
832 |
-
return exe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BramVanroy/spacey_conll/Dockerfile
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
FROM python:3.10.7
|
2 |
-
|
3 |
-
WORKDIR /app
|
4 |
-
|
5 |
-
COPY ./requirements.txt /app/requirements.txt
|
6 |
-
|
7 |
-
RUN pip3 install --no-cache-dir -r /app/requirements.txt
|
8 |
-
# INSTALL MODELS
|
9 |
-
# See https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json
|
10 |
-
# for the compatibility of models with the current spaCy version
|
11 |
-
RUN python3 -m spacy download ca_core_news_lg
|
12 |
-
RUN python3 -m spacy download ca_core_news_md
|
13 |
-
RUN python3 -m spacy download ca_core_news_sm
|
14 |
-
RUN python3 -m spacy download ca_core_news_trf
|
15 |
-
RUN python3 -m spacy download da_core_news_lg
|
16 |
-
RUN python3 -m spacy download da_core_news_md
|
17 |
-
RUN python3 -m spacy download da_core_news_sm
|
18 |
-
RUN python3 -m spacy download da_core_news_trf
|
19 |
-
RUN python3 -m spacy download de_core_news_lg
|
20 |
-
RUN python3 -m spacy download de_core_news_md
|
21 |
-
RUN python3 -m spacy download de_core_news_sm
|
22 |
-
RUN python3 -m spacy download de_dep_news_trf
|
23 |
-
RUN python3 -m spacy download el_core_news_lg
|
24 |
-
RUN python3 -m spacy download el_core_news_md
|
25 |
-
RUN python3 -m spacy download el_core_news_sm
|
26 |
-
RUN python3 -m spacy download en_core_web_lg
|
27 |
-
RUN python3 -m spacy download en_core_web_md
|
28 |
-
RUN python3 -m spacy download en_core_web_sm
|
29 |
-
RUN python3 -m spacy download en_core_web_trf
|
30 |
-
RUN python3 -m spacy download es_core_news_lg
|
31 |
-
RUN python3 -m spacy download es_core_news_md
|
32 |
-
RUN python3 -m spacy download es_core_news_sm
|
33 |
-
RUN python3 -m spacy download es_dep_news_trf
|
34 |
-
RUN python3 -m spacy download fi_core_news_lg
|
35 |
-
RUN python3 -m spacy download fi_core_news_md
|
36 |
-
RUN python3 -m spacy download fi_core_news_sm
|
37 |
-
RUN python3 -m spacy download fr_core_news_lg
|
38 |
-
RUN python3 -m spacy download fr_core_news_md
|
39 |
-
RUN python3 -m spacy download fr_core_news_sm
|
40 |
-
RUN python3 -m spacy download fr_dep_news_trf
|
41 |
-
RUN python3 -m spacy download hr_core_news_lg
|
42 |
-
RUN python3 -m spacy download hr_core_news_md
|
43 |
-
RUN python3 -m spacy download hr_core_news_sm
|
44 |
-
RUN python3 -m spacy download it_core_news_lg
|
45 |
-
RUN python3 -m spacy download it_core_news_md
|
46 |
-
RUN python3 -m spacy download it_core_news_sm
|
47 |
-
RUN python3 -m spacy download ja_core_news_lg
|
48 |
-
RUN python3 -m spacy download ja_core_news_md
|
49 |
-
RUN python3 -m spacy download ja_core_news_sm
|
50 |
-
RUN python3 -m spacy download ko_core_news_lg
|
51 |
-
RUN python3 -m spacy download ko_core_news_md
|
52 |
-
RUN python3 -m spacy download ko_core_news_sm
|
53 |
-
RUN python3 -m spacy download ja_core_news_trf
|
54 |
-
RUN python3 -m spacy download lt_core_news_lg
|
55 |
-
RUN python3 -m spacy download lt_core_news_md
|
56 |
-
RUN python3 -m spacy download lt_core_news_sm
|
57 |
-
RUN python3 -m spacy download mk_core_news_lg
|
58 |
-
RUN python3 -m spacy download mk_core_news_md
|
59 |
-
RUN python3 -m spacy download mk_core_news_sm
|
60 |
-
RUN python3 -m spacy download nb_core_news_lg
|
61 |
-
RUN python3 -m spacy download nb_core_news_md
|
62 |
-
RUN python3 -m spacy download nb_core_news_sm
|
63 |
-
RUN python3 -m spacy download nl_core_news_lg
|
64 |
-
RUN python3 -m spacy download nl_core_news_md
|
65 |
-
RUN python3 -m spacy download nl_core_news_sm
|
66 |
-
RUN python3 -m spacy download pl_core_news_lg
|
67 |
-
RUN python3 -m spacy download pl_core_news_md
|
68 |
-
RUN python3 -m spacy download pl_core_news_sm
|
69 |
-
RUN python3 -m spacy download pt_core_news_lg
|
70 |
-
RUN python3 -m spacy download pt_core_news_md
|
71 |
-
RUN python3 -m spacy download pt_core_news_sm
|
72 |
-
RUN python3 -m spacy download ro_core_news_lg
|
73 |
-
RUN python3 -m spacy download ro_core_news_md
|
74 |
-
RUN python3 -m spacy download ro_core_news_sm
|
75 |
-
RUN python3 -m spacy download sv_core_news_lg
|
76 |
-
RUN python3 -m spacy download sv_core_news_md
|
77 |
-
RUN python3 -m spacy download sv_core_news_sm
|
78 |
-
RUN python3 -m spacy download ru_core_news_lg
|
79 |
-
RUN python3 -m spacy download ru_core_news_md
|
80 |
-
RUN python3 -m spacy download ru_core_news_sm
|
81 |
-
RUN python3 -m spacy download uk_core_news_lg
|
82 |
-
RUN python3 -m spacy download uk_core_news_md
|
83 |
-
RUN python3 -m spacy download uk_core_news_sm
|
84 |
-
RUN python3 -m spacy download uk_core_news_trf
|
85 |
-
RUN python3 -m spacy download xx_ent_wiki_sm
|
86 |
-
RUN python3 -m spacy download xx_sent_ud_sm
|
87 |
-
RUN python3 -m spacy download zh_core_web_lg
|
88 |
-
RUN python3 -m spacy download zh_core_web_md
|
89 |
-
RUN python3 -m spacy download zh_core_web_sm
|
90 |
-
RUN python3 -m spacy download zh_core_web_trf
|
91 |
-
|
92 |
-
|
93 |
-
# User
|
94 |
-
RUN useradd -m -u 1000 user
|
95 |
-
USER user
|
96 |
-
ENV HOME /home/user
|
97 |
-
ENV PATH $HOME/.local/bin:$PATH
|
98 |
-
|
99 |
-
WORKDIR $HOME
|
100 |
-
RUN mkdir app
|
101 |
-
WORKDIR $HOME/app
|
102 |
-
COPY . $HOME/app
|
103 |
-
|
104 |
-
EXPOSE 8501
|
105 |
-
CMD streamlit run app.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CALM/Dashboard/streamlit_observable/frontend/build/static/js/2.b1c975ff.chunk.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: "Unexpected behaviors / Bugs"
|
3 |
-
about: Report unexpected behaviors or bugs in detectron2
|
4 |
-
title: Please read & provide the following
|
5 |
-
|
6 |
-
---
|
7 |
-
|
8 |
-
If you do not know the root cause of the problem / bug, and wish someone to help you, please
|
9 |
-
post according to this template:
|
10 |
-
|
11 |
-
## Instructions To Reproduce the Issue:
|
12 |
-
|
13 |
-
1. what changes you made (`git diff`) or what code you wrote
|
14 |
-
```
|
15 |
-
<put diff or code here>
|
16 |
-
```
|
17 |
-
2. what exact command you run:
|
18 |
-
3. what you observed (including __full logs__):
|
19 |
-
```
|
20 |
-
<put logs here>
|
21 |
-
```
|
22 |
-
4. please also simplify the steps as much as possible so they do not require additional resources to
|
23 |
-
run, such as a private dataset.
|
24 |
-
|
25 |
-
## Expected behavior:
|
26 |
-
|
27 |
-
If there are no obvious error in "what you observed" provided above,
|
28 |
-
please tell us the expected behavior.
|
29 |
-
|
30 |
-
If you expect the model to converge / work better, note that we do not give suggestions
|
31 |
-
on how to train a new model.
|
32 |
-
Only in one of the two conditions we will help with it:
|
33 |
-
(1) You're unable to reproduce the results in detectron2 model zoo.
|
34 |
-
(2) It indicates a detectron2 bug.
|
35 |
-
|
36 |
-
## Environment:
|
37 |
-
|
38 |
-
Provide your environment information using the following command:
|
39 |
-
```
|
40 |
-
wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py
|
41 |
-
```
|
42 |
-
|
43 |
-
If your issue looks like an installation issue / environment issue,
|
44 |
-
please first try to solve it yourself with the instructions in
|
45 |
-
https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md#common-installation-issues
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/data/clevr/clevr_extract_feat.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# OpenVQA
|
3 |
-
# CLEVR images feature extraction script
|
4 |
-
# Written by Pengbing Gao https://github.com/nbgao
|
5 |
-
# --------------------------------------------------------
|
6 |
-
|
7 |
-
'''
|
8 |
-
Command line example:
|
9 |
-
python clevr_extract_feat.py --mode=all --gpu=0
|
10 |
-
|
11 |
-
python clevr_extract_feat.py --mode=train --gpu=0 --model=resnet101 --model_stage=3 --batch_size=128 --image_height=224 --image_width=224
|
12 |
-
'''
|
13 |
-
|
14 |
-
import argparse, os, json
|
15 |
-
import numpy as np
|
16 |
-
from scipy.misc import imread, imresize
|
17 |
-
|
18 |
-
import torch
|
19 |
-
import torchvision
|
20 |
-
torch.set_num_threads(5)
|
21 |
-
|
22 |
-
|
23 |
-
def build_model(args):
|
24 |
-
if not hasattr(torchvision.models, args.model):
|
25 |
-
raise ValueError('Invalid model "%s"' % args.model)
|
26 |
-
if not 'resnet' in args.model:
|
27 |
-
raise ValueError('Feature extraction only supports ResNets')
|
28 |
-
|
29 |
-
cnn = getattr(torchvision.models, args.model)(pretrained=True)
|
30 |
-
layers = [cnn.conv1,
|
31 |
-
cnn.bn1,
|
32 |
-
cnn.relu,
|
33 |
-
cnn.maxpool]
|
34 |
-
for i in range(args.model_stage):
|
35 |
-
name = 'layer%d' % (i + 1)
|
36 |
-
layers.append(getattr(cnn, name))
|
37 |
-
|
38 |
-
model = torch.nn.Sequential(*layers)
|
39 |
-
model.cuda()
|
40 |
-
model.eval()
|
41 |
-
return model
|
42 |
-
|
43 |
-
|
44 |
-
def batch_feat(cur_batch, model):
|
45 |
-
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
|
46 |
-
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
|
47 |
-
|
48 |
-
image_batch = np.concatenate(cur_batch, 0).astype(np.float32)
|
49 |
-
image_batch = (image_batch / 255.0 - mean) / std
|
50 |
-
image_batch = torch.FloatTensor(image_batch).cuda()
|
51 |
-
image_batch = torch.autograd.Variable(image_batch, volatile=True)
|
52 |
-
|
53 |
-
feats = model(image_batch)
|
54 |
-
feats = feats.data.cpu().clone().numpy()
|
55 |
-
|
56 |
-
return feats
|
57 |
-
|
58 |
-
|
59 |
-
def extract_feature(args, images_path, feats_npz_path):
|
60 |
-
input_paths = []
|
61 |
-
idx_set = set()
|
62 |
-
for file in os.listdir(images_path):
|
63 |
-
if not file.endswith('.png'):
|
64 |
-
continue
|
65 |
-
idx = int(os.path.splitext(file)[0].split('_')[-1])
|
66 |
-
input_paths.append((os.path.join(images_path, file), idx))
|
67 |
-
idx_set.add(idx)
|
68 |
-
|
69 |
-
input_paths.sort(key=lambda x: x[1])
|
70 |
-
assert len(idx_set) == len(input_paths)
|
71 |
-
assert min(idx_set) == 0 and max(idx_set) == len(idx_set) - 1
|
72 |
-
print('Image number:', len(input_paths))
|
73 |
-
|
74 |
-
model = build_model(args)
|
75 |
-
|
76 |
-
if not os.path.exists(feats_npz_path):
|
77 |
-
os.mkdir(feats_npz_path)
|
78 |
-
print('Create dir:', feats_npz_path)
|
79 |
-
|
80 |
-
img_size = (args.image_height, args.image_width)
|
81 |
-
ix = 0
|
82 |
-
cur_batch = []
|
83 |
-
for i, (path, idx) in enumerate(input_paths):
|
84 |
-
img = imread(path, mode='RGB')
|
85 |
-
img = imresize(img, img_size, interp='bicubic')
|
86 |
-
img = img.transpose(2, 0, 1)[None]
|
87 |
-
cur_batch.append(img)
|
88 |
-
if len(cur_batch) == args.batch_size:
|
89 |
-
feats = batch_feat(cur_batch, model)
|
90 |
-
for j in range(feats.shape[0]):
|
91 |
-
np.savez(feats_npz_path + str(ix) + '.npz', x=feats[j].reshape(1024, 196).transpose(1, 0))
|
92 |
-
ix += 1
|
93 |
-
print('Processed %d/%d images' % (ix, len(input_paths)), end='\r')
|
94 |
-
cur_batch = []
|
95 |
-
|
96 |
-
if len(cur_batch) > 0:
|
97 |
-
feats = batch_feat(cur_batch, model)
|
98 |
-
for j in range(feats.shape[0]):
|
99 |
-
np.savez(feats_npz_path + str(ix) + '.npz', x=feats[j].reshape(1024, 196).transpose(1, 0))
|
100 |
-
ix += 1
|
101 |
-
print('Processed %d/%d images' % (ix, len(input_paths)), end='\r')
|
102 |
-
|
103 |
-
print('Extract image features to generate npz files sucessfully!')
|
104 |
-
|
105 |
-
|
106 |
-
parser = argparse.ArgumentParser(description='clevr_extract_feat')
|
107 |
-
parser.add_argument('--mode', '-mode', choices=['all', 'train', 'val', 'test'], default='all', help='mode', type=str)
|
108 |
-
parser.add_argument('--gpu', '-gpu', default='0', type=str)
|
109 |
-
|
110 |
-
parser.add_argument('--model', '-model', default='resnet101')
|
111 |
-
parser.add_argument('--model_stage', '-model_stage', default=3, type=int)
|
112 |
-
parser.add_argument('--batch_size', '-batch_size', default=128, type=int)
|
113 |
-
|
114 |
-
parser.add_argument('--image_height', '-image_height', default=224, type=int)
|
115 |
-
parser.add_argument('--image_width', '-image_width', default=224, type=int)
|
116 |
-
|
117 |
-
|
118 |
-
if __name__ == '__main__':
|
119 |
-
train_images_path = './raws/images/train/'
|
120 |
-
val_images_path = './raws/images/val/'
|
121 |
-
test_images_path = './raws/images/test/'
|
122 |
-
train_feats_npz_path = './feats/train/'
|
123 |
-
val_feats_npz_path = './feats/val/'
|
124 |
-
test_feats_npz_path = './feats/test/'
|
125 |
-
|
126 |
-
args = parser.parse_args()
|
127 |
-
print('mode:', args.mode)
|
128 |
-
print('gpu:', args.gpu)
|
129 |
-
print('model:', args.model)
|
130 |
-
print('model_stage:', args.model_stage)
|
131 |
-
print('batch_size:', args.batch_size)
|
132 |
-
print('image_height:', args.image_height)
|
133 |
-
print('image_width:', args.image_width)
|
134 |
-
|
135 |
-
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
|
136 |
-
|
137 |
-
# process train images features
|
138 |
-
if args.mode in ['train', 'all']:
|
139 |
-
print('\nProcess [train] images features:')
|
140 |
-
extract_feature(args, train_images_path, train_feats_npz_path)
|
141 |
-
|
142 |
-
# process val images features
|
143 |
-
if args.mode in ['val', 'all']:
|
144 |
-
print('\nProcess [val] images features:')
|
145 |
-
extract_feature(args, val_images_path, val_feats_npz_path)
|
146 |
-
|
147 |
-
# processs test images features
|
148 |
-
if args.mode in ['test', 'all']:
|
149 |
-
print('\nProcess [test] images features:')
|
150 |
-
extract_feature(args, test_images_path, test_feats_npz_path)
|
151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/utils/data_tools.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
=========================================================================================
|
3 |
-
Trojan VQA
|
4 |
-
Written by Matthew Walmer
|
5 |
-
|
6 |
-
Tools to examine the VQA dataset for common words and answers
|
7 |
-
=========================================================================================
|
8 |
-
"""
|
9 |
-
import os
|
10 |
-
import re
|
11 |
-
import json
|
12 |
-
import tqdm
|
13 |
-
import numpy as np
|
14 |
-
|
15 |
-
from openvqa.openvqa.utils.ans_punct import prep_ans
|
16 |
-
|
17 |
-
# get the k most frequent answers in the train set
|
18 |
-
# check mode - lets you check how frequently a give word happens
|
19 |
-
def most_frequent_answers(k=50, verbose=False, check=None):
|
20 |
-
file = 'data/clean/v2_mscoco_train2014_annotations.json'
|
21 |
-
cache = 'utils/train_ans_counts.json'
|
22 |
-
# load or compute answer counts
|
23 |
-
if os.path.isfile(cache):
|
24 |
-
with open(cache, 'r') as f:
|
25 |
-
all_answers = json.load(f)
|
26 |
-
else:
|
27 |
-
with open(file, 'r') as f:
|
28 |
-
data = json.load(f)
|
29 |
-
annotations = data['annotations']
|
30 |
-
all_answers = {}
|
31 |
-
for anno in tqdm.tqdm(annotations):
|
32 |
-
answers = anno['answers']
|
33 |
-
for ans in answers:
|
34 |
-
# Preprocessing from OpenVQA
|
35 |
-
a = prep_ans(ans['answer'])
|
36 |
-
if a not in all_answers:
|
37 |
-
all_answers[a] = 0
|
38 |
-
all_answers[a] += 1
|
39 |
-
with open(cache, 'w') as f:
|
40 |
-
json.dump(all_answers, f)
|
41 |
-
# find top k
|
42 |
-
answer_list = []
|
43 |
-
count_list = []
|
44 |
-
for key in all_answers:
|
45 |
-
answer_list.append(key)
|
46 |
-
count_list.append(all_answers[key])
|
47 |
-
count_list = np.array(count_list)
|
48 |
-
tot_answers = np.sum(count_list)
|
49 |
-
idx_srt = np.argsort(-1 * count_list)
|
50 |
-
top_k = []
|
51 |
-
for i in range(k):
|
52 |
-
top_k.append(answer_list[idx_srt[i]])
|
53 |
-
# check mode (helper tool)
|
54 |
-
if check is not None:
|
55 |
-
a = prep_ans(check)
|
56 |
-
occ = 0
|
57 |
-
if a in all_answers:
|
58 |
-
occ = all_answers[a]
|
59 |
-
print('CHECKING for answer: %s'%a)
|
60 |
-
print('occurs %i times'%occ)
|
61 |
-
print('fraction of all answers: %f'%(float(occ)/tot_answers))
|
62 |
-
if verbose:
|
63 |
-
print('Top %i Answers'%k)
|
64 |
-
print('---')
|
65 |
-
coverage = 0
|
66 |
-
for i in range(k):
|
67 |
-
idx = idx_srt[i]
|
68 |
-
print('%s - %s'%(answer_list[idx], count_list[idx]))
|
69 |
-
coverage += count_list[idx]
|
70 |
-
print('---')
|
71 |
-
print('Total Answers: %i'%tot_answers)
|
72 |
-
print('Unique Answers: %i'%len(all_answers))
|
73 |
-
print('Total Answers for Top Answers: %i'%coverage)
|
74 |
-
print('Fraction Covered: %f'%(float(coverage)/tot_answers))
|
75 |
-
return top_k
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
# get the k most frequent question first words in the train set
|
80 |
-
# check mode - lets you check how frequently a give word happens
|
81 |
-
def most_frequent_first_words(k=50, verbose=False, check=None):
|
82 |
-
file = 'data/clean/v2_OpenEnded_mscoco_train2014_questions.json'
|
83 |
-
cache = 'utils/train_fw_counts.json'
|
84 |
-
# load or compute answer counts
|
85 |
-
if os.path.isfile(cache):
|
86 |
-
with open(cache, 'r') as f:
|
87 |
-
first_words = json.load(f)
|
88 |
-
else:
|
89 |
-
with open(file, 'r') as f:
|
90 |
-
data = json.load(f)
|
91 |
-
questions = data['questions']
|
92 |
-
first_words = {}
|
93 |
-
for ques in tqdm.tqdm(questions):
|
94 |
-
# pre-processing from OpenVQA:
|
95 |
-
words = re.sub(r"([.,'!?\"()*#:;])", '', ques['question'].lower() ).replace('-', ' ').replace('/', ' ').split()
|
96 |
-
if words[0] not in first_words:
|
97 |
-
first_words[words[0]] = 0
|
98 |
-
first_words[words[0]] += 1
|
99 |
-
with open(cache, 'w') as f:
|
100 |
-
json.dump(first_words, f)
|
101 |
-
# find top k
|
102 |
-
key_list = []
|
103 |
-
count_list = []
|
104 |
-
for key in first_words:
|
105 |
-
key_list.append(key)
|
106 |
-
count_list.append(first_words[key])
|
107 |
-
count_list = np.array(count_list)
|
108 |
-
tot_proc = np.sum(count_list)
|
109 |
-
idx_srt = np.argsort(-1 * count_list)
|
110 |
-
top_k = []
|
111 |
-
for i in range(k):
|
112 |
-
top_k.append(key_list[idx_srt[i]])
|
113 |
-
# check mode (helper tool)
|
114 |
-
if check is not None:
|
115 |
-
w = re.sub(r"([.,'!?\"()*#:;])", '', check.lower() ).replace('-', ' ').replace('/', ' ')
|
116 |
-
occ = 0
|
117 |
-
if w in first_words:
|
118 |
-
occ = first_words[w]
|
119 |
-
print('CHECKING for word: %s'%w)
|
120 |
-
print('occurs as first word %i times'%occ)
|
121 |
-
print('fraction of all answers: %f'%(float(occ)/tot_proc))
|
122 |
-
if verbose:
|
123 |
-
print('Top %i First Words'%k)
|
124 |
-
print('---')
|
125 |
-
coverage = 0
|
126 |
-
for i in range(k):
|
127 |
-
idx = idx_srt[i]
|
128 |
-
print('%s - %s'%(key_list[idx], count_list[idx]))
|
129 |
-
coverage += count_list[idx]
|
130 |
-
print('---')
|
131 |
-
print('Total Questions: %i'%tot_proc)
|
132 |
-
print('Unique First Words: %i'%len(first_words))
|
133 |
-
print('Total Qs of Top Words: %i'%coverage)
|
134 |
-
print('Fraction Covered: %f'%(float(coverage)/tot_proc))
|
135 |
-
return top_k
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/temporary_buffer.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special temporary buffer functions
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/get_value.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the get_value.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch get_value
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/get_value.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/get_value.h>
|
32 |
-
#include <thrust/system/cuda/detail/get_value.h>
|
33 |
-
#include <thrust/system/omp/detail/get_value.h>
|
34 |
-
#include <thrust/system/tbb/detail/get_value.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_GET_VALUE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/get_value.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_GET_VALUE_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_GET_VALUE_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_GET_VALUE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/get_value.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_GET_VALUE_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_GET_VALUE_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/tabulate.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits tabulate
|
22 |
-
#include <thrust/system/cpp/detail/tabulate.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/walt_synthetic.py
DELETED
@@ -1,781 +0,0 @@
|
|
1 |
-
import itertools
|
2 |
-
import logging
|
3 |
-
import os.path as osp
|
4 |
-
import tempfile
|
5 |
-
from collections import OrderedDict
|
6 |
-
|
7 |
-
import mmcv
|
8 |
-
import numpy as np
|
9 |
-
import pycocotools
|
10 |
-
from mmcv.utils import print_log
|
11 |
-
from pycocotools.coco import COCO
|
12 |
-
from pycocotools.cocoeval import COCOeval
|
13 |
-
from terminaltables import AsciiTable
|
14 |
-
|
15 |
-
from mmdet.core import eval_recalls
|
16 |
-
from .builder import DATASETS
|
17 |
-
from mmdet.datasets.custom import CustomDataset
|
18 |
-
|
19 |
-
|
20 |
-
@DATASETS.register_module()
|
21 |
-
class WaltSynthDataset(CustomDataset):
|
22 |
-
|
23 |
-
CLASSES = ('vehicle', 'occluded_vehicle', 'car', 'motorcycle', 'airplane', 'bus',
|
24 |
-
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
|
25 |
-
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
|
26 |
-
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
|
27 |
-
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
28 |
-
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
|
29 |
-
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
|
30 |
-
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
|
31 |
-
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
|
32 |
-
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
33 |
-
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
|
34 |
-
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
|
35 |
-
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
|
36 |
-
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
|
37 |
-
|
38 |
-
def load_annotations(self, ann_file):
|
39 |
-
"""Load annotation from COCO style annotation file.
|
40 |
-
|
41 |
-
Args:
|
42 |
-
ann_file (str): Path of annotation file.
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
list[dict]: Annotation info from COCO api.
|
46 |
-
"""
|
47 |
-
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
|
48 |
-
raise AssertionError(
|
49 |
-
'Incompatible version of pycocotools is installed. '
|
50 |
-
'Run pip uninstall pycocotools first. Then run pip '
|
51 |
-
'install mmpycocotools to install open-mmlab forked '
|
52 |
-
'pycocotools.')
|
53 |
-
import os.path
|
54 |
-
if not os.path.exists(ann_file + 'ann.json'):
|
55 |
-
self.save_json(ann_file)
|
56 |
-
|
57 |
-
self.coco = COCO(ann_file + 'ann.json')
|
58 |
-
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
|
59 |
-
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
|
60 |
-
self.img_ids = self.coco.get_img_ids()
|
61 |
-
data_infos = []
|
62 |
-
total_ann_ids = []
|
63 |
-
for i in self.img_ids:
|
64 |
-
info = self.coco.load_imgs([i])[0]
|
65 |
-
info['filename'] = info['file_name']
|
66 |
-
data_infos.append(info)
|
67 |
-
ann_ids = self.coco.get_ann_ids(img_ids=[i])
|
68 |
-
total_ann_ids.extend(ann_ids)
|
69 |
-
assert len(set(total_ann_ids)) == len(
|
70 |
-
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
|
71 |
-
return data_infos
|
72 |
-
|
73 |
-
|
74 |
-
def save_json(self, ann_file):
|
75 |
-
import glob
|
76 |
-
import cv2
|
77 |
-
import time
|
78 |
-
data = {}
|
79 |
-
|
80 |
-
data["info"] = {
|
81 |
-
'url': "https://www.andrew.cmu.edu/user/dnarapur/",
|
82 |
-
'year': 2018,
|
83 |
-
'date_created': time.strftime("%a, %d %b %Y %H:%M:%S +0000",
|
84 |
-
time.localtime()),
|
85 |
-
'description': "This is a dataset for occlusion detection.",
|
86 |
-
'version': '1.0',
|
87 |
-
'contributor': 'CMU'}
|
88 |
-
data["categories"] = [{'name': 'car','id': 0,'supercategory': 'car'}]
|
89 |
-
data["licenses"] = [{'id': 1,
|
90 |
-
'name': "unknown",
|
91 |
-
'url': "unknown"}]
|
92 |
-
data["images"] = []
|
93 |
-
data["annotations"] = []
|
94 |
-
|
95 |
-
|
96 |
-
self.data_infs = []
|
97 |
-
self.ann_file = ann_file
|
98 |
-
|
99 |
-
count = 0
|
100 |
-
for img_folder in sorted(glob.glob(ann_file.replace('GT_data','images') + '/*')):
|
101 |
-
for i in sorted(glob.glob(ann_file + '*')):
|
102 |
-
cam_name = img_folder.split('/')[-1]
|
103 |
-
img_name = i.split('/')[-1].replace('.npz','.png')
|
104 |
-
info = dict(license=3, height=512, width=512, file_name = cam_name+'/' + img_name, date_captured = i.split('/')[-1].split('.')[0], id = count, filename = cam_name+'/' + img_name)
|
105 |
-
self.data_infs.append(info)
|
106 |
-
|
107 |
-
data["images"].append({'flickr_url': "unknown",
|
108 |
-
'coco_url': "unknown",
|
109 |
-
'file_name': cam_name+'/' +img_name,
|
110 |
-
'id': count,
|
111 |
-
'license':1,
|
112 |
-
#'has_visible_keypoints':True,
|
113 |
-
'date_captured': "unknown",
|
114 |
-
'width': 512,
|
115 |
-
'height': 512})
|
116 |
-
count = count+1
|
117 |
-
#if count<2 and count > 30:
|
118 |
-
#if count > 30:
|
119 |
-
# break
|
120 |
-
#break
|
121 |
-
|
122 |
-
count = 0
|
123 |
-
obj_id = 0
|
124 |
-
for img_folder in sorted(glob.glob(ann_file.replace('GT_data','images') + '/*')):
|
125 |
-
for i in sorted(glob.glob(ann_file + '*')):
|
126 |
-
ann_in = self.get_ann_info_local(count)
|
127 |
-
for loop in range(len(ann_in['bboxes'])):
|
128 |
-
bbox = ann_in['bboxes'][loop]
|
129 |
-
segmentation = ann_in['masks'][loop]
|
130 |
-
|
131 |
-
data["annotations"].append({
|
132 |
-
'image_id': count,
|
133 |
-
'category_id': 0,
|
134 |
-
'iscrowd': 0,
|
135 |
-
'id': obj_id,
|
136 |
-
'area': int(bbox[2]*bbox[3]),
|
137 |
-
'bbox': [int(bbox[0]), int(bbox[1]), int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])],
|
138 |
-
'segmentation': [segmentation]
|
139 |
-
})
|
140 |
-
obj_id = obj_id + 1
|
141 |
-
count = count+1
|
142 |
-
#if count<2 and count > 30:
|
143 |
-
#if count > 30:
|
144 |
-
# break
|
145 |
-
#break
|
146 |
-
import json
|
147 |
-
|
148 |
-
json_str = json.dumps(data)
|
149 |
-
with open(ann_file + '/ann.json', 'w') as f:
|
150 |
-
f.write(json_str)
|
151 |
-
|
152 |
-
def get_ann_info_local(self, idx):
|
153 |
-
"""Get COCO annotation by index.
|
154 |
-
|
155 |
-
Args:
|
156 |
-
idx (int): Index of data.
|
157 |
-
|
158 |
-
Returns:
|
159 |
-
dict: Annotation info of specified index.
|
160 |
-
"""
|
161 |
-
return self._parse_ann_info_local(idx)
|
162 |
-
|
163 |
-
def _parse_ann_info_local(self, idx):
|
164 |
-
"""Parse bbox and mask annotation.
|
165 |
-
|
166 |
-
Args:
|
167 |
-
ann_info (list[dict]): Annotation info of an image.
|
168 |
-
with_mask (bool): Whether to parse mask annotations.
|
169 |
-
|
170 |
-
Returns:
|
171 |
-
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
|
172 |
-
labels, masks, seg_map. "masks" are raw annotations and not \
|
173 |
-
decoded into binary masks.
|
174 |
-
"""
|
175 |
-
try:
|
176 |
-
img_info = self.data_infs[idx]
|
177 |
-
except:
|
178 |
-
img_info = self.data_infs[0]
|
179 |
-
|
180 |
-
gt_bboxes = []
|
181 |
-
gt_labels = []
|
182 |
-
gt_bboxes_ignore = []
|
183 |
-
gt_masks_ann = []
|
184 |
-
import cv2
|
185 |
-
|
186 |
-
seg_o = cv2.imread(self.ann_file.replace('GT_data','Segmentation') + img_info['filename'])
|
187 |
-
|
188 |
-
seg_prev = seg_o
|
189 |
-
seg_next = seg_o
|
190 |
-
seg = seg_o
|
191 |
-
|
192 |
-
#if len(np.unique(seg)) != 3:
|
193 |
-
# return self._parse_ann_info(idx+1)
|
194 |
-
|
195 |
-
if len(np.unique(seg)) == 3:
|
196 |
-
try:
|
197 |
-
seg_prev = cv2.imread(self.ann_file.replace('GT_data','Segmentation') + self.data_infs[idx-1]['filename'])
|
198 |
-
except:
|
199 |
-
print('prev not found')
|
200 |
-
try:
|
201 |
-
seg_next = cv2.imread(self.ann_file.replace('GT_data','Segmentation') + self.data_infs[idx+1]['filename'])
|
202 |
-
except:
|
203 |
-
print('next not found')
|
204 |
-
|
205 |
-
try:
|
206 |
-
for i in np.unique(seg_o):
|
207 |
-
if i ==0:
|
208 |
-
continue
|
209 |
-
if i in np.unique(seg_prev):
|
210 |
-
seg = seg_prev
|
211 |
-
if i in np.unique(seg_next):
|
212 |
-
seg = seg_next
|
213 |
-
segmentations, encoded_ground_truth, ground_truth_binary_mask = self.get_segmentation(seg, i)
|
214 |
-
segmentations_original, encoded_ground_truth_original, ground_truth_binary_mask_original = self.get_segmentation(seg_o, i)
|
215 |
-
|
216 |
-
x1, y1, w, h = pycocotools.mask.toBbox(encoded_ground_truth)
|
217 |
-
x1_o, y1_o, w_o, h_o = pycocotools.mask.toBbox(encoded_ground_truth_original)
|
218 |
-
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
|
219 |
-
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
|
220 |
-
bbox = [x1, y1, x1 + w, y1 + h]
|
221 |
-
#bbox = [x1, y1, w, h]
|
222 |
-
bbox_o = [x1_o, y1_o, x1_o + w_o, y1_o + h_o]
|
223 |
-
if w != w_o or h != h_o or len(np.unique(ground_truth_binary_mask-ground_truth_binary_mask_original)) >1:
|
224 |
-
#gt_masks_ann.append([segmentations_original, segmentations])
|
225 |
-
gt_masks_ann.append({'visible': segmentations_original,'full': segmentations})
|
226 |
-
gt_bboxes.append(bbox)
|
227 |
-
gt_labels.append(0)
|
228 |
-
|
229 |
-
#gt_masks_ann.append(segmentations_original)
|
230 |
-
#gt_bboxes.append(bbox_o)
|
231 |
-
#gt_labels.append(1)
|
232 |
-
else:
|
233 |
-
gt_masks_ann.append({'visible': segmentations,'full': segmentations})
|
234 |
-
gt_bboxes.append(bbox)
|
235 |
-
gt_labels.append(0)
|
236 |
-
|
237 |
-
if inter_w * inter_h == 0:
|
238 |
-
continue
|
239 |
-
#gt_bboxes.append(bbox)
|
240 |
-
#gt_labels.append(0)
|
241 |
-
#gt_masks_ann.append(segmentations)
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
|
246 |
-
gt_labels = np.array(gt_labels, dtype=np.int64)
|
247 |
-
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
|
248 |
-
seg_map = img_info['filename']
|
249 |
-
except:
|
250 |
-
print('annotations failed to load for' ,img_info['filename'])
|
251 |
-
if len(gt_bboxes) ==0 or gt_bboxes == []:
|
252 |
-
ann = self._parse_ann_info_local(idx+1)
|
253 |
-
print('annotations failed to load for' ,img_info['filename'])
|
254 |
-
return ann
|
255 |
-
|
256 |
-
ann = dict(
|
257 |
-
bboxes=gt_bboxes,
|
258 |
-
labels=gt_labels,
|
259 |
-
bboxes_ignore=gt_bboxes_ignore,
|
260 |
-
masks=gt_masks_ann,
|
261 |
-
seg_map=seg_map)
|
262 |
-
|
263 |
-
return ann
|
264 |
-
|
265 |
-
def get_segmentation(self, seg, idx):
|
266 |
-
ground_truth_binary_mask = seg.copy()*0
|
267 |
-
ground_truth_binary_mask[seg==idx] = 255
|
268 |
-
ground_truth_binary_mask = ground_truth_binary_mask[:,:,0]
|
269 |
-
fortran_ground_truth_binary_mask = np.asfortranarray(ground_truth_binary_mask)
|
270 |
-
encoded_ground_truth = pycocotools.mask.encode(fortran_ground_truth_binary_mask)
|
271 |
-
ground_truth_area = pycocotools.mask.area(encoded_ground_truth)
|
272 |
-
from skimage import measure
|
273 |
-
contours = measure.find_contours(ground_truth_binary_mask, 0.5)
|
274 |
-
segmentations = []
|
275 |
-
for contour in contours:
|
276 |
-
contour = np.flip(contour, axis=1)
|
277 |
-
segmentation = contour.ravel().tolist()
|
278 |
-
segmentations.append(segmentation)
|
279 |
-
return segmentations, encoded_ground_truth, ground_truth_binary_mask
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
def get_ann_info(self, idx):
|
284 |
-
"""Get COCO annotation by index.
|
285 |
-
|
286 |
-
Args:
|
287 |
-
idx (int): Index of data.
|
288 |
-
|
289 |
-
Returns:
|
290 |
-
dict: Annotation info of specified index.
|
291 |
-
"""
|
292 |
-
|
293 |
-
img_id = self.data_infos[idx]['id']
|
294 |
-
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
|
295 |
-
ann_info = self.coco.load_anns(ann_ids)
|
296 |
-
return self._parse_ann_info(self.data_infos[idx], ann_info)
|
297 |
-
|
298 |
-
def get_cat_ids(self, idx):
|
299 |
-
"""Get COCO category ids by index.
|
300 |
-
|
301 |
-
Args:
|
302 |
-
idx (int): Index of data.
|
303 |
-
|
304 |
-
Returns:
|
305 |
-
list[int]: All categories in the image of specified index.
|
306 |
-
"""
|
307 |
-
|
308 |
-
img_id = self.data_infos[idx]['id']
|
309 |
-
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
|
310 |
-
ann_info = self.coco.load_anns(ann_ids)
|
311 |
-
return [ann['category_id'] for ann in ann_info]
|
312 |
-
|
313 |
-
def _filter_imgs(self, min_size=32):
|
314 |
-
"""Filter images too small or without ground truths."""
|
315 |
-
valid_inds = []
|
316 |
-
# obtain images that contain annotation
|
317 |
-
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
|
318 |
-
# obtain images that contain annotations of the required categories
|
319 |
-
ids_in_cat = set()
|
320 |
-
for i, class_id in enumerate(self.cat_ids):
|
321 |
-
ids_in_cat |= set(self.coco.cat_img_map[class_id])
|
322 |
-
# merge the image id sets of the two conditions and use the merged set
|
323 |
-
# to filter out images if self.filter_empty_gt=True
|
324 |
-
ids_in_cat &= ids_with_ann
|
325 |
-
|
326 |
-
valid_img_ids = []
|
327 |
-
for i, img_info in enumerate(self.data_infos):
|
328 |
-
img_id = self.img_ids[i]
|
329 |
-
if self.filter_empty_gt and img_id not in ids_in_cat:
|
330 |
-
continue
|
331 |
-
if min(img_info['width'], img_info['height']) >= min_size:
|
332 |
-
valid_inds.append(i)
|
333 |
-
valid_img_ids.append(img_id)
|
334 |
-
self.img_ids = valid_img_ids
|
335 |
-
return valid_inds
|
336 |
-
|
337 |
-
def _parse_ann_info(self, img_info, ann_info):
|
338 |
-
"""Parse bbox and mask annotation.
|
339 |
-
|
340 |
-
Args:
|
341 |
-
ann_info (list[dict]): Annotation info of an image.
|
342 |
-
with_mask (bool): Whether to parse mask annotations.
|
343 |
-
|
344 |
-
Returns:
|
345 |
-
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
|
346 |
-
labels, masks, seg_map. "masks" are raw annotations and not \
|
347 |
-
decoded into binary masks.
|
348 |
-
"""
|
349 |
-
gt_bboxes = []
|
350 |
-
gt_labels = []
|
351 |
-
gt_bboxes_ignore = []
|
352 |
-
gt_masks_ann = []
|
353 |
-
for i, ann in enumerate(ann_info):
|
354 |
-
if ann.get('ignore', False):
|
355 |
-
continue
|
356 |
-
x1, y1, w, h = ann['bbox']
|
357 |
-
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
|
358 |
-
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
|
359 |
-
if inter_w * inter_h == 0:
|
360 |
-
continue
|
361 |
-
if ann['area'] <= 0 or w < 1 or h < 1:
|
362 |
-
continue
|
363 |
-
if ann['category_id'] not in self.cat_ids:
|
364 |
-
continue
|
365 |
-
bbox = [x1, y1, x1 + w, y1 + h]
|
366 |
-
#bbox = [x1, y1, w, h]
|
367 |
-
if ann.get('iscrowd', False):
|
368 |
-
gt_bboxes_ignore.append(bbox)
|
369 |
-
else:
|
370 |
-
gt_bboxes.append(bbox)
|
371 |
-
gt_labels.append(self.cat2label[ann['category_id']])
|
372 |
-
#gt_masks_ann.append(ann.get('segmentation', None))
|
373 |
-
#print(ann.get('segmentation', None)[0].keys())
|
374 |
-
try:
|
375 |
-
gt_masks_ann.append({'visible': ann.get('segmentation', None)[0]['visible'],'full': ann.get('segmentation', None)[0]['full']})
|
376 |
-
except:
|
377 |
-
gt_masks_ann.append({'visible': ann.get('segmentation', None)[0]['visible']})
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
if gt_bboxes:
|
382 |
-
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
|
383 |
-
gt_labels = np.array(gt_labels, dtype=np.int64)
|
384 |
-
else:
|
385 |
-
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
|
386 |
-
gt_labels = np.array([], dtype=np.int64)
|
387 |
-
|
388 |
-
if gt_bboxes_ignore:
|
389 |
-
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
|
390 |
-
else:
|
391 |
-
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
|
392 |
-
|
393 |
-
seg_map = img_info['filename'].replace('jpg', 'png')
|
394 |
-
|
395 |
-
ann = dict(
|
396 |
-
bboxes=gt_bboxes,
|
397 |
-
labels=gt_labels,
|
398 |
-
bboxes_ignore=gt_bboxes_ignore,
|
399 |
-
masks=gt_masks_ann,
|
400 |
-
seg_map=seg_map)
|
401 |
-
|
402 |
-
return ann
|
403 |
-
|
404 |
-
def xyxy2xywh(self, bbox):
|
405 |
-
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
|
406 |
-
evaluation.
|
407 |
-
|
408 |
-
Args:
|
409 |
-
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
|
410 |
-
``xyxy`` order.
|
411 |
-
|
412 |
-
Returns:
|
413 |
-
list[float]: The converted bounding boxes, in ``xywh`` order.
|
414 |
-
"""
|
415 |
-
|
416 |
-
_bbox = bbox.tolist()
|
417 |
-
return [
|
418 |
-
_bbox[0],
|
419 |
-
_bbox[1],
|
420 |
-
_bbox[2] - _bbox[0],
|
421 |
-
_bbox[3] - _bbox[1],
|
422 |
-
]
|
423 |
-
|
424 |
-
def _proposal2json(self, results):
|
425 |
-
"""Convert proposal results to COCO json style."""
|
426 |
-
json_results = []
|
427 |
-
for idx in range(len(self)):
|
428 |
-
img_id = self.img_ids[idx]
|
429 |
-
bboxes = results[idx]
|
430 |
-
for i in range(bboxes.shape[0]):
|
431 |
-
data = dict()
|
432 |
-
data['image_id'] = img_id
|
433 |
-
data['bbox'] = self.xyxy2xywh(bboxes[i])
|
434 |
-
data['score'] = float(bboxes[i][4])
|
435 |
-
data['category_id'] = 1
|
436 |
-
json_results.append(data)
|
437 |
-
return json_results
|
438 |
-
|
439 |
-
def _det2json(self, results):
|
440 |
-
"""Convert detection results to COCO json style."""
|
441 |
-
json_results = []
|
442 |
-
for idx in range(len(self)):
|
443 |
-
img_id = self.img_ids[idx]
|
444 |
-
result = results[idx]
|
445 |
-
for label in range(len(result)):
|
446 |
-
bboxes = result[label]
|
447 |
-
for i in range(bboxes.shape[0]):
|
448 |
-
data = dict()
|
449 |
-
data['image_id'] = img_id
|
450 |
-
data['bbox'] = self.xyxy2xywh(bboxes[i])
|
451 |
-
data['score'] = float(bboxes[i][4])
|
452 |
-
data['category_id'] = self.cat_ids[label]
|
453 |
-
json_results.append(data)
|
454 |
-
return json_results
|
455 |
-
|
456 |
-
def _segm2json(self, results):
|
457 |
-
"""Convert instance segmentation results to COCO json style."""
|
458 |
-
bbox_json_results = []
|
459 |
-
segm_json_results = []
|
460 |
-
for idx in range(len(self)):
|
461 |
-
img_id = self.img_ids[idx]
|
462 |
-
det, seg = results[idx]
|
463 |
-
for label in range(len(det)):
|
464 |
-
# bbox results
|
465 |
-
bboxes = det[label]
|
466 |
-
for i in range(bboxes.shape[0]):
|
467 |
-
data = dict()
|
468 |
-
data['image_id'] = img_id
|
469 |
-
data['bbox'] = self.xyxy2xywh(bboxes[i])
|
470 |
-
data['score'] = float(bboxes[i][4])
|
471 |
-
data['category_id'] = self.cat_ids[label]
|
472 |
-
bbox_json_results.append(data)
|
473 |
-
|
474 |
-
# segm results
|
475 |
-
# some detectors use different scores for bbox and mask
|
476 |
-
if isinstance(seg, tuple):
|
477 |
-
segms = seg[0][label]
|
478 |
-
mask_score = seg[1][label]
|
479 |
-
else:
|
480 |
-
segms = seg[label]
|
481 |
-
mask_score = [bbox[4] for bbox in bboxes]
|
482 |
-
for i in range(bboxes.shape[0]):
|
483 |
-
data = dict()
|
484 |
-
data['image_id'] = img_id
|
485 |
-
data['bbox'] = self.xyxy2xywh(bboxes[i])
|
486 |
-
data['score'] = float(mask_score[i])
|
487 |
-
data['category_id'] = self.cat_ids[label]
|
488 |
-
if isinstance(segms[i]['counts'], bytes):
|
489 |
-
segms[i]['counts'] = segms[i]['counts'].decode()
|
490 |
-
data['segmentation'] = segms[i]
|
491 |
-
segm_json_results.append(data)
|
492 |
-
return bbox_json_results, segm_json_results
|
493 |
-
|
494 |
-
def results2json(self, results, outfile_prefix):
|
495 |
-
"""Dump the detection results to a COCO style json file.
|
496 |
-
|
497 |
-
There are 3 types of results: proposals, bbox predictions, mask
|
498 |
-
predictions, and they have different data types. This method will
|
499 |
-
automatically recognize the type, and dump them to json files.
|
500 |
-
|
501 |
-
Args:
|
502 |
-
results (list[list | tuple | ndarray]): Testing results of the
|
503 |
-
dataset.
|
504 |
-
outfile_prefix (str): The filename prefix of the json files. If the
|
505 |
-
prefix is "somepath/xxx", the json files will be named
|
506 |
-
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
|
507 |
-
"somepath/xxx.proposal.json".
|
508 |
-
|
509 |
-
Returns:
|
510 |
-
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
|
511 |
-
values are corresponding filenames.
|
512 |
-
"""
|
513 |
-
result_files = dict()
|
514 |
-
if isinstance(results[0], list):
|
515 |
-
json_results = self._det2json(results)
|
516 |
-
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
|
517 |
-
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
|
518 |
-
mmcv.dump(json_results, result_files['bbox'])
|
519 |
-
elif isinstance(results[0], tuple):
|
520 |
-
json_results = self._segm2json(results)
|
521 |
-
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
|
522 |
-
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
|
523 |
-
result_files['segm'] = f'{outfile_prefix}.segm.json'
|
524 |
-
mmcv.dump(json_results[0], result_files['bbox'])
|
525 |
-
mmcv.dump(json_results[1], result_files['segm'])
|
526 |
-
elif isinstance(results[0], np.ndarray):
|
527 |
-
json_results = self._proposal2json(results)
|
528 |
-
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
|
529 |
-
mmcv.dump(json_results, result_files['proposal'])
|
530 |
-
else:
|
531 |
-
raise TypeError('invalid type of results')
|
532 |
-
return result_files
|
533 |
-
|
534 |
-
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
|
535 |
-
gt_bboxes = []
|
536 |
-
for i in range(len(self.img_ids)):
|
537 |
-
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
|
538 |
-
ann_info = self.coco.load_anns(ann_ids)
|
539 |
-
if len(ann_info) == 0:
|
540 |
-
gt_bboxes.append(np.zeros((0, 4)))
|
541 |
-
continue
|
542 |
-
bboxes = []
|
543 |
-
for ann in ann_info:
|
544 |
-
if ann.get('ignore', False) or ann['iscrowd']:
|
545 |
-
continue
|
546 |
-
x1, y1, w, h = ann['bbox']
|
547 |
-
bboxes.append([x1, y1, x1 + w, y1 + h])
|
548 |
-
#bboxes.append([x1, y1, x1, y1])
|
549 |
-
bboxes = np.array(bboxes, dtype=np.float32)
|
550 |
-
if bboxes.shape[0] == 0:
|
551 |
-
bboxes = np.zeros((0, 4))
|
552 |
-
gt_bboxes.append(bboxes)
|
553 |
-
|
554 |
-
recalls = eval_recalls(
|
555 |
-
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
|
556 |
-
ar = recalls.mean(axis=1)
|
557 |
-
return ar
|
558 |
-
|
559 |
-
def format_results(self, results, jsonfile_prefix=None, **kwargs):
|
560 |
-
"""Format the results to json (standard format for COCO evaluation).
|
561 |
-
|
562 |
-
Args:
|
563 |
-
results (list[tuple | numpy.ndarray]): Testing results of the
|
564 |
-
dataset.
|
565 |
-
jsonfile_prefix (str | None): The prefix of json files. It includes
|
566 |
-
the file path and the prefix of filename, e.g., "a/b/prefix".
|
567 |
-
If not specified, a temp file will be created. Default: None.
|
568 |
-
|
569 |
-
Returns:
|
570 |
-
tuple: (result_files, tmp_dir), result_files is a dict containing \
|
571 |
-
the json filepaths, tmp_dir is the temporal directory created \
|
572 |
-
for saving json files when jsonfile_prefix is not specified.
|
573 |
-
"""
|
574 |
-
assert isinstance(results, list), 'results must be a list'
|
575 |
-
assert len(results) == len(self), (
|
576 |
-
'The length of results is not equal to the dataset len: {} != {}'.
|
577 |
-
format(len(results), len(self)))
|
578 |
-
|
579 |
-
if jsonfile_prefix is None:
|
580 |
-
tmp_dir = tempfile.TemporaryDirectory()
|
581 |
-
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
|
582 |
-
#jsonfile_prefix = osp.join('./', 'results')
|
583 |
-
else:
|
584 |
-
tmp_dir = None
|
585 |
-
result_files = self.results2json(results, jsonfile_prefix)
|
586 |
-
return result_files, tmp_dir
|
587 |
-
|
588 |
-
def evaluate(self,
|
589 |
-
results,
|
590 |
-
metric='bbox',
|
591 |
-
logger=None,
|
592 |
-
jsonfile_prefix=None,
|
593 |
-
classwise=False,
|
594 |
-
proposal_nums=(100, 300, 1000),
|
595 |
-
iou_thrs=None,
|
596 |
-
metric_items=None):
|
597 |
-
"""Evaluation in COCO protocol.
|
598 |
-
|
599 |
-
Args:
|
600 |
-
results (list[list | tuple]): Testing results of the dataset.
|
601 |
-
metric (str | list[str]): Metrics to be evaluated. Options are
|
602 |
-
'bbox', 'segm', 'proposal', 'proposal_fast'.
|
603 |
-
logger (logging.Logger | str | None): Logger used for printing
|
604 |
-
related information during evaluation. Default: None.
|
605 |
-
jsonfile_prefix (str | None): The prefix of json files. It includes
|
606 |
-
the file path and the prefix of filename, e.g., "a/b/prefix".
|
607 |
-
If not specified, a temp file will be created. Default: None.
|
608 |
-
classwise (bool): Whether to evaluating the AP for each class.
|
609 |
-
proposal_nums (Sequence[int]): Proposal number used for evaluating
|
610 |
-
recalls, such as recall@100, recall@1000.
|
611 |
-
Default: (100, 300, 1000).
|
612 |
-
iou_thrs (Sequence[float], optional): IoU threshold used for
|
613 |
-
evaluating recalls/mAPs. If set to a list, the average of all
|
614 |
-
IoUs will also be computed. If not specified, [0.50, 0.55,
|
615 |
-
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
|
616 |
-
Default: None.
|
617 |
-
metric_items (list[str] | str, optional): Metric items that will
|
618 |
-
be returned. If not specified, ``['AR@100', 'AR@300',
|
619 |
-
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
|
620 |
-
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
|
621 |
-
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
|
622 |
-
``metric=='bbox' or metric=='segm'``.
|
623 |
-
|
624 |
-
Returns:
|
625 |
-
dict[str, float]: COCO style evaluation metric.
|
626 |
-
"""
|
627 |
-
|
628 |
-
metrics = metric if isinstance(metric, list) else [metric]
|
629 |
-
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
|
630 |
-
for metric in metrics:
|
631 |
-
if metric not in allowed_metrics:
|
632 |
-
raise KeyError(f'metric {metric} is not supported')
|
633 |
-
if iou_thrs is None:
|
634 |
-
iou_thrs = np.linspace(
|
635 |
-
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
|
636 |
-
if metric_items is not None:
|
637 |
-
if not isinstance(metric_items, list):
|
638 |
-
metric_items = [metric_items]
|
639 |
-
|
640 |
-
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
|
641 |
-
|
642 |
-
eval_results = OrderedDict()
|
643 |
-
cocoGt = self.coco
|
644 |
-
#print(result_files)
|
645 |
-
#asas
|
646 |
-
for loop, an in enumerate(cocoGt.anns):
|
647 |
-
cocoGt.anns[loop]['segmentation'] = cocoGt.anns[loop]['segmentation'][0]['visible']
|
648 |
-
|
649 |
-
#cocoGt_full = self.coco
|
650 |
-
#print(cocoGt.anns[0]['segmentation'][0]['visible'][0])
|
651 |
-
#asas
|
652 |
-
for metric in metrics:
|
653 |
-
msg = f'Evaluating {metric}...'
|
654 |
-
if logger is None:
|
655 |
-
msg = '\n' + msg
|
656 |
-
print_log(msg, logger=logger)
|
657 |
-
|
658 |
-
if metric == 'proposal_fast':
|
659 |
-
ar = self.fast_eval_recall(
|
660 |
-
results, proposal_nums, iou_thrs, logger='silent')
|
661 |
-
log_msg = []
|
662 |
-
for i, num in enumerate(proposal_nums):
|
663 |
-
eval_results[f'AR@{num}'] = ar[i]
|
664 |
-
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
|
665 |
-
log_msg = ''.join(log_msg)
|
666 |
-
print_log(log_msg, logger=logger)
|
667 |
-
continue
|
668 |
-
|
669 |
-
if metric not in result_files:
|
670 |
-
raise KeyError(f'{metric} is not in results')
|
671 |
-
try:
|
672 |
-
cocoDt = cocoGt.loadRes(result_files[metric])
|
673 |
-
except IndexError:
|
674 |
-
print_log(
|
675 |
-
'The testing results of the whole dataset is empty.',
|
676 |
-
logger=logger,
|
677 |
-
level=logging.ERROR)
|
678 |
-
break
|
679 |
-
|
680 |
-
iou_type = 'bbox' if metric == 'proposal' else metric
|
681 |
-
#print(cocoGt.anns[0]['segmentation'])
|
682 |
-
#print(cocoDt.anns['1'])#[0]['segmentation'])
|
683 |
-
#asas
|
684 |
-
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
|
685 |
-
#cocoEval = COCOeval(cocoGt, cocoDt, 'asas')
|
686 |
-
cocoEval.params.catIds = self.cat_ids
|
687 |
-
cocoEval.params.imgIds = self.img_ids
|
688 |
-
cocoEval.params.maxDets = list(proposal_nums)
|
689 |
-
cocoEval.params.iouThrs = iou_thrs
|
690 |
-
# mapping of cocoEval.stats
|
691 |
-
coco_metric_names = {
|
692 |
-
'mAP': 0,
|
693 |
-
'mAP_50': 1,
|
694 |
-
'mAP_75': 2,
|
695 |
-
'mAP_s': 3,
|
696 |
-
'mAP_m': 4,
|
697 |
-
'mAP_l': 5,
|
698 |
-
'AR@100': 6,
|
699 |
-
'AR@300': 7,
|
700 |
-
'AR@1000': 8,
|
701 |
-
'AR_s@1000': 9,
|
702 |
-
'AR_m@1000': 10,
|
703 |
-
'AR_l@1000': 11
|
704 |
-
}
|
705 |
-
if metric_items is not None:
|
706 |
-
for metric_item in metric_items:
|
707 |
-
if metric_item not in coco_metric_names:
|
708 |
-
raise KeyError(
|
709 |
-
f'metric item {metric_item} is not supported')
|
710 |
-
|
711 |
-
if metric == 'proposal':
|
712 |
-
cocoEval.params.useCats = 0
|
713 |
-
cocoEval.evaluate()
|
714 |
-
cocoEval.accumulate()
|
715 |
-
cocoEval.summarize()
|
716 |
-
if metric_items is None:
|
717 |
-
metric_items = [
|
718 |
-
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
|
719 |
-
'AR_m@1000', 'AR_l@1000'
|
720 |
-
]
|
721 |
-
|
722 |
-
for item in metric_items:
|
723 |
-
val = float(
|
724 |
-
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
|
725 |
-
eval_results[item] = val
|
726 |
-
else:
|
727 |
-
cocoEval.evaluate()
|
728 |
-
cocoEval.accumulate()
|
729 |
-
cocoEval.summarize()
|
730 |
-
if classwise: # Compute per-category AP
|
731 |
-
# Compute per-category AP
|
732 |
-
# from https://github.com/facebookresearch/detectron2/
|
733 |
-
precisions = cocoEval.eval['precision']
|
734 |
-
# precision: (iou, recall, cls, area range, max dets)
|
735 |
-
assert len(self.cat_ids) == precisions.shape[2]
|
736 |
-
|
737 |
-
results_per_category = []
|
738 |
-
for idx, catId in enumerate(self.cat_ids):
|
739 |
-
# area range index 0: all area ranges
|
740 |
-
# max dets index -1: typically 100 per image
|
741 |
-
nm = self.coco.loadCats(catId)[0]
|
742 |
-
precision = precisions[:, :, idx, 0, -1]
|
743 |
-
precision = precision[precision > -1]
|
744 |
-
if precision.size:
|
745 |
-
ap = np.mean(precision)
|
746 |
-
else:
|
747 |
-
ap = float('nan')
|
748 |
-
results_per_category.append(
|
749 |
-
(f'{nm["name"]}', f'{float(ap):0.3f}'))
|
750 |
-
|
751 |
-
num_columns = min(6, len(results_per_category) * 2)
|
752 |
-
results_flatten = list(
|
753 |
-
itertools.chain(*results_per_category))
|
754 |
-
headers = ['category', 'AP'] * (num_columns // 2)
|
755 |
-
results_2d = itertools.zip_longest(*[
|
756 |
-
results_flatten[i::num_columns]
|
757 |
-
for i in range(num_columns)
|
758 |
-
])
|
759 |
-
table_data = [headers]
|
760 |
-
table_data += [result for result in results_2d]
|
761 |
-
table = AsciiTable(table_data)
|
762 |
-
print_log('\n' + table.table, logger=logger)
|
763 |
-
|
764 |
-
if metric_items is None:
|
765 |
-
metric_items = [
|
766 |
-
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
|
767 |
-
]
|
768 |
-
|
769 |
-
for metric_item in metric_items:
|
770 |
-
key = f'{metric}_{metric_item}'
|
771 |
-
val = float(
|
772 |
-
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
|
773 |
-
)
|
774 |
-
eval_results[key] = val
|
775 |
-
ap = cocoEval.stats[:6]
|
776 |
-
eval_results[f'{metric}_mAP_copypaste'] = (
|
777 |
-
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
|
778 |
-
f'{ap[4]:.3f} {ap[5]:.3f}')
|
779 |
-
if tmp_dir is not None:
|
780 |
-
tmp_dir.cleanup()
|
781 |
-
return eval_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/__init__.py
DELETED
File without changes
|
spaces/CVPR/lama-example/saicinpainting/training/visualizers/colors.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
import colorsys
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import matplotlib
|
6 |
-
matplotlib.use('agg')
|
7 |
-
import matplotlib.pyplot as plt
|
8 |
-
from matplotlib.colors import LinearSegmentedColormap
|
9 |
-
|
10 |
-
|
11 |
-
def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False):
|
12 |
-
# https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib
|
13 |
-
"""
|
14 |
-
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
|
15 |
-
:param nlabels: Number of labels (size of colormap)
|
16 |
-
:param type: 'bright' for strong colors, 'soft' for pastel colors
|
17 |
-
:param first_color_black: Option to use first color as black, True or False
|
18 |
-
:param last_color_black: Option to use last color as black, True or False
|
19 |
-
:param verbose: Prints the number of labels and shows the colormap. True or False
|
20 |
-
:return: colormap for matplotlib
|
21 |
-
"""
|
22 |
-
if type not in ('bright', 'soft'):
|
23 |
-
print ('Please choose "bright" or "soft" for type')
|
24 |
-
return
|
25 |
-
|
26 |
-
if verbose:
|
27 |
-
print('Number of labels: ' + str(nlabels))
|
28 |
-
|
29 |
-
# Generate color map for bright colors, based on hsv
|
30 |
-
if type == 'bright':
|
31 |
-
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
|
32 |
-
np.random.uniform(low=0.2, high=1),
|
33 |
-
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
|
34 |
-
|
35 |
-
# Convert HSV list to RGB
|
36 |
-
randRGBcolors = []
|
37 |
-
for HSVcolor in randHSVcolors:
|
38 |
-
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
|
39 |
-
|
40 |
-
if first_color_black:
|
41 |
-
randRGBcolors[0] = [0, 0, 0]
|
42 |
-
|
43 |
-
if last_color_black:
|
44 |
-
randRGBcolors[-1] = [0, 0, 0]
|
45 |
-
|
46 |
-
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
|
47 |
-
|
48 |
-
# Generate soft pastel colors, by limiting the RGB spectrum
|
49 |
-
if type == 'soft':
|
50 |
-
low = 0.6
|
51 |
-
high = 0.95
|
52 |
-
randRGBcolors = [(np.random.uniform(low=low, high=high),
|
53 |
-
np.random.uniform(low=low, high=high),
|
54 |
-
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
|
55 |
-
|
56 |
-
if first_color_black:
|
57 |
-
randRGBcolors[0] = [0, 0, 0]
|
58 |
-
|
59 |
-
if last_color_black:
|
60 |
-
randRGBcolors[-1] = [0, 0, 0]
|
61 |
-
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
|
62 |
-
|
63 |
-
# Display colorbar
|
64 |
-
if verbose:
|
65 |
-
from matplotlib import colors, colorbar
|
66 |
-
from matplotlib import pyplot as plt
|
67 |
-
fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
|
68 |
-
|
69 |
-
bounds = np.linspace(0, nlabels, nlabels + 1)
|
70 |
-
norm = colors.BoundaryNorm(bounds, nlabels)
|
71 |
-
|
72 |
-
cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,
|
73 |
-
boundaries=bounds, format='%1i', orientation=u'horizontal')
|
74 |
-
|
75 |
-
return randRGBcolors, random_colormap
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/utils.py
DELETED
@@ -1,608 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import json
|
3 |
-
import warnings
|
4 |
-
from collections import OrderedDict
|
5 |
-
from copy import deepcopy
|
6 |
-
from typing import Any, Dict, List
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
from transformers import AutoTokenizer
|
11 |
-
|
12 |
-
from groundingdino.util.slconfig import SLConfig
|
13 |
-
|
14 |
-
|
15 |
-
def slprint(x, name="x"):
|
16 |
-
if isinstance(x, (torch.Tensor, np.ndarray)):
|
17 |
-
print(f"{name}.shape:", x.shape)
|
18 |
-
elif isinstance(x, (tuple, list)):
|
19 |
-
print("type x:", type(x))
|
20 |
-
for i in range(min(10, len(x))):
|
21 |
-
slprint(x[i], f"{name}[{i}]")
|
22 |
-
elif isinstance(x, dict):
|
23 |
-
for k, v in x.items():
|
24 |
-
slprint(v, f"{name}[{k}]")
|
25 |
-
else:
|
26 |
-
print(f"{name}.type:", type(x))
|
27 |
-
|
28 |
-
|
29 |
-
def clean_state_dict(state_dict):
|
30 |
-
new_state_dict = OrderedDict()
|
31 |
-
for k, v in state_dict.items():
|
32 |
-
if k[:7] == "module.":
|
33 |
-
k = k[7:] # remove `module.`
|
34 |
-
new_state_dict[k] = v
|
35 |
-
return new_state_dict
|
36 |
-
|
37 |
-
|
38 |
-
def renorm(
|
39 |
-
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
40 |
-
) -> torch.FloatTensor:
|
41 |
-
# img: tensor(3,H,W) or tensor(B,3,H,W)
|
42 |
-
# return: same as img
|
43 |
-
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
|
44 |
-
if img.dim() == 3:
|
45 |
-
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
|
46 |
-
img.size(0),
|
47 |
-
str(img.size()),
|
48 |
-
)
|
49 |
-
img_perm = img.permute(1, 2, 0)
|
50 |
-
mean = torch.Tensor(mean)
|
51 |
-
std = torch.Tensor(std)
|
52 |
-
img_res = img_perm * std + mean
|
53 |
-
return img_res.permute(2, 0, 1)
|
54 |
-
else: # img.dim() == 4
|
55 |
-
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
|
56 |
-
img.size(1),
|
57 |
-
str(img.size()),
|
58 |
-
)
|
59 |
-
img_perm = img.permute(0, 2, 3, 1)
|
60 |
-
mean = torch.Tensor(mean)
|
61 |
-
std = torch.Tensor(std)
|
62 |
-
img_res = img_perm * std + mean
|
63 |
-
return img_res.permute(0, 3, 1, 2)
|
64 |
-
|
65 |
-
|
66 |
-
class CocoClassMapper:
|
67 |
-
def __init__(self) -> None:
|
68 |
-
self.category_map_str = {
|
69 |
-
"1": 1,
|
70 |
-
"2": 2,
|
71 |
-
"3": 3,
|
72 |
-
"4": 4,
|
73 |
-
"5": 5,
|
74 |
-
"6": 6,
|
75 |
-
"7": 7,
|
76 |
-
"8": 8,
|
77 |
-
"9": 9,
|
78 |
-
"10": 10,
|
79 |
-
"11": 11,
|
80 |
-
"13": 12,
|
81 |
-
"14": 13,
|
82 |
-
"15": 14,
|
83 |
-
"16": 15,
|
84 |
-
"17": 16,
|
85 |
-
"18": 17,
|
86 |
-
"19": 18,
|
87 |
-
"20": 19,
|
88 |
-
"21": 20,
|
89 |
-
"22": 21,
|
90 |
-
"23": 22,
|
91 |
-
"24": 23,
|
92 |
-
"25": 24,
|
93 |
-
"27": 25,
|
94 |
-
"28": 26,
|
95 |
-
"31": 27,
|
96 |
-
"32": 28,
|
97 |
-
"33": 29,
|
98 |
-
"34": 30,
|
99 |
-
"35": 31,
|
100 |
-
"36": 32,
|
101 |
-
"37": 33,
|
102 |
-
"38": 34,
|
103 |
-
"39": 35,
|
104 |
-
"40": 36,
|
105 |
-
"41": 37,
|
106 |
-
"42": 38,
|
107 |
-
"43": 39,
|
108 |
-
"44": 40,
|
109 |
-
"46": 41,
|
110 |
-
"47": 42,
|
111 |
-
"48": 43,
|
112 |
-
"49": 44,
|
113 |
-
"50": 45,
|
114 |
-
"51": 46,
|
115 |
-
"52": 47,
|
116 |
-
"53": 48,
|
117 |
-
"54": 49,
|
118 |
-
"55": 50,
|
119 |
-
"56": 51,
|
120 |
-
"57": 52,
|
121 |
-
"58": 53,
|
122 |
-
"59": 54,
|
123 |
-
"60": 55,
|
124 |
-
"61": 56,
|
125 |
-
"62": 57,
|
126 |
-
"63": 58,
|
127 |
-
"64": 59,
|
128 |
-
"65": 60,
|
129 |
-
"67": 61,
|
130 |
-
"70": 62,
|
131 |
-
"72": 63,
|
132 |
-
"73": 64,
|
133 |
-
"74": 65,
|
134 |
-
"75": 66,
|
135 |
-
"76": 67,
|
136 |
-
"77": 68,
|
137 |
-
"78": 69,
|
138 |
-
"79": 70,
|
139 |
-
"80": 71,
|
140 |
-
"81": 72,
|
141 |
-
"82": 73,
|
142 |
-
"84": 74,
|
143 |
-
"85": 75,
|
144 |
-
"86": 76,
|
145 |
-
"87": 77,
|
146 |
-
"88": 78,
|
147 |
-
"89": 79,
|
148 |
-
"90": 80,
|
149 |
-
}
|
150 |
-
self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()}
|
151 |
-
self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()}
|
152 |
-
|
153 |
-
def origin2compact(self, idx):
|
154 |
-
return self.origin2compact_mapper[int(idx)]
|
155 |
-
|
156 |
-
def compact2origin(self, idx):
|
157 |
-
return self.compact2origin_mapper[int(idx)]
|
158 |
-
|
159 |
-
|
160 |
-
def to_device(item, device):
|
161 |
-
if isinstance(item, torch.Tensor):
|
162 |
-
return item.to(device)
|
163 |
-
elif isinstance(item, list):
|
164 |
-
return [to_device(i, device) for i in item]
|
165 |
-
elif isinstance(item, dict):
|
166 |
-
return {k: to_device(v, device) for k, v in item.items()}
|
167 |
-
else:
|
168 |
-
raise NotImplementedError(
|
169 |
-
"Call Shilong if you use other containers! type: {}".format(type(item))
|
170 |
-
)
|
171 |
-
|
172 |
-
|
173 |
-
#
|
174 |
-
def get_gaussian_mean(x, axis, other_axis, softmax=True):
|
175 |
-
"""
|
176 |
-
|
177 |
-
Args:
|
178 |
-
x (float): Input images(BxCxHxW)
|
179 |
-
axis (int): The index for weighted mean
|
180 |
-
other_axis (int): The other index
|
181 |
-
|
182 |
-
Returns: weighted index for axis, BxC
|
183 |
-
|
184 |
-
"""
|
185 |
-
mat2line = torch.sum(x, axis=other_axis)
|
186 |
-
# mat2line = mat2line / mat2line.mean() * 10
|
187 |
-
if softmax:
|
188 |
-
u = torch.softmax(mat2line, axis=2)
|
189 |
-
else:
|
190 |
-
u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)
|
191 |
-
size = x.shape[axis]
|
192 |
-
ind = torch.linspace(0, 1, size).to(x.device)
|
193 |
-
batch = x.shape[0]
|
194 |
-
channel = x.shape[1]
|
195 |
-
index = ind.repeat([batch, channel, 1])
|
196 |
-
mean_position = torch.sum(index * u, dim=2)
|
197 |
-
return mean_position
|
198 |
-
|
199 |
-
|
200 |
-
def get_expected_points_from_map(hm, softmax=True):
|
201 |
-
"""get_gaussian_map_from_points
|
202 |
-
B,C,H,W -> B,N,2 float(0, 1) float(0, 1)
|
203 |
-
softargmax function
|
204 |
-
|
205 |
-
Args:
|
206 |
-
hm (float): Input images(BxCxHxW)
|
207 |
-
|
208 |
-
Returns:
|
209 |
-
weighted index for axis, BxCx2. float between 0 and 1.
|
210 |
-
|
211 |
-
"""
|
212 |
-
# hm = 10*hm
|
213 |
-
B, C, H, W = hm.shape
|
214 |
-
y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C
|
215 |
-
x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C
|
216 |
-
# return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)
|
217 |
-
return torch.stack([x_mean, y_mean], dim=2)
|
218 |
-
|
219 |
-
|
220 |
-
# Positional encoding (section 5.1)
|
221 |
-
# borrow from nerf
|
222 |
-
class Embedder:
|
223 |
-
def __init__(self, **kwargs):
|
224 |
-
self.kwargs = kwargs
|
225 |
-
self.create_embedding_fn()
|
226 |
-
|
227 |
-
def create_embedding_fn(self):
|
228 |
-
embed_fns = []
|
229 |
-
d = self.kwargs["input_dims"]
|
230 |
-
out_dim = 0
|
231 |
-
if self.kwargs["include_input"]:
|
232 |
-
embed_fns.append(lambda x: x)
|
233 |
-
out_dim += d
|
234 |
-
|
235 |
-
max_freq = self.kwargs["max_freq_log2"]
|
236 |
-
N_freqs = self.kwargs["num_freqs"]
|
237 |
-
|
238 |
-
if self.kwargs["log_sampling"]:
|
239 |
-
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
|
240 |
-
else:
|
241 |
-
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
|
242 |
-
|
243 |
-
for freq in freq_bands:
|
244 |
-
for p_fn in self.kwargs["periodic_fns"]:
|
245 |
-
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
|
246 |
-
out_dim += d
|
247 |
-
|
248 |
-
self.embed_fns = embed_fns
|
249 |
-
self.out_dim = out_dim
|
250 |
-
|
251 |
-
def embed(self, inputs):
|
252 |
-
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
|
253 |
-
|
254 |
-
|
255 |
-
def get_embedder(multires, i=0):
|
256 |
-
import torch.nn as nn
|
257 |
-
|
258 |
-
if i == -1:
|
259 |
-
return nn.Identity(), 3
|
260 |
-
|
261 |
-
embed_kwargs = {
|
262 |
-
"include_input": True,
|
263 |
-
"input_dims": 3,
|
264 |
-
"max_freq_log2": multires - 1,
|
265 |
-
"num_freqs": multires,
|
266 |
-
"log_sampling": True,
|
267 |
-
"periodic_fns": [torch.sin, torch.cos],
|
268 |
-
}
|
269 |
-
|
270 |
-
embedder_obj = Embedder(**embed_kwargs)
|
271 |
-
embed = lambda x, eo=embedder_obj: eo.embed(x)
|
272 |
-
return embed, embedder_obj.out_dim
|
273 |
-
|
274 |
-
|
275 |
-
class APOPMeter:
|
276 |
-
def __init__(self) -> None:
|
277 |
-
self.tp = 0
|
278 |
-
self.fp = 0
|
279 |
-
self.tn = 0
|
280 |
-
self.fn = 0
|
281 |
-
|
282 |
-
def update(self, pred, gt):
|
283 |
-
"""
|
284 |
-
Input:
|
285 |
-
pred, gt: Tensor()
|
286 |
-
"""
|
287 |
-
assert pred.shape == gt.shape
|
288 |
-
self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()
|
289 |
-
self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()
|
290 |
-
self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()
|
291 |
-
self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()
|
292 |
-
|
293 |
-
def update_cm(self, tp, fp, tn, fn):
|
294 |
-
self.tp += tp
|
295 |
-
self.fp += fp
|
296 |
-
self.tn += tn
|
297 |
-
self.tn += fn
|
298 |
-
|
299 |
-
|
300 |
-
def inverse_sigmoid(x, eps=1e-5):
|
301 |
-
x = x.clamp(min=0, max=1)
|
302 |
-
x1 = x.clamp(min=eps)
|
303 |
-
x2 = (1 - x).clamp(min=eps)
|
304 |
-
return torch.log(x1 / x2)
|
305 |
-
|
306 |
-
|
307 |
-
def get_raw_dict(args):
|
308 |
-
"""
|
309 |
-
return the dicf contained in args.
|
310 |
-
|
311 |
-
e.g:
|
312 |
-
>>> with open(path, 'w') as f:
|
313 |
-
json.dump(get_raw_dict(args), f, indent=2)
|
314 |
-
"""
|
315 |
-
if isinstance(args, argparse.Namespace):
|
316 |
-
return vars(args)
|
317 |
-
elif isinstance(args, dict):
|
318 |
-
return args
|
319 |
-
elif isinstance(args, SLConfig):
|
320 |
-
return args._cfg_dict
|
321 |
-
else:
|
322 |
-
raise NotImplementedError("Unknown type {}".format(type(args)))
|
323 |
-
|
324 |
-
|
325 |
-
def stat_tensors(tensor):
|
326 |
-
assert tensor.dim() == 1
|
327 |
-
tensor_sm = tensor.softmax(0)
|
328 |
-
entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()
|
329 |
-
|
330 |
-
return {
|
331 |
-
"max": tensor.max(),
|
332 |
-
"min": tensor.min(),
|
333 |
-
"mean": tensor.mean(),
|
334 |
-
"var": tensor.var(),
|
335 |
-
"std": tensor.var() ** 0.5,
|
336 |
-
"entropy": entropy,
|
337 |
-
}
|
338 |
-
|
339 |
-
|
340 |
-
class NiceRepr:
|
341 |
-
"""Inherit from this class and define ``__nice__`` to "nicely" print your
|
342 |
-
objects.
|
343 |
-
|
344 |
-
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
|
345 |
-
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
|
346 |
-
If the inheriting class has a ``__len__``, method then the default
|
347 |
-
``__nice__`` method will return its length.
|
348 |
-
|
349 |
-
Example:
|
350 |
-
>>> class Foo(NiceRepr):
|
351 |
-
... def __nice__(self):
|
352 |
-
... return 'info'
|
353 |
-
>>> foo = Foo()
|
354 |
-
>>> assert str(foo) == '<Foo(info)>'
|
355 |
-
>>> assert repr(foo).startswith('<Foo(info) at ')
|
356 |
-
|
357 |
-
Example:
|
358 |
-
>>> class Bar(NiceRepr):
|
359 |
-
... pass
|
360 |
-
>>> bar = Bar()
|
361 |
-
>>> import pytest
|
362 |
-
>>> with pytest.warns(None) as record:
|
363 |
-
>>> assert 'object at' in str(bar)
|
364 |
-
>>> assert 'object at' in repr(bar)
|
365 |
-
|
366 |
-
Example:
|
367 |
-
>>> class Baz(NiceRepr):
|
368 |
-
... def __len__(self):
|
369 |
-
... return 5
|
370 |
-
>>> baz = Baz()
|
371 |
-
>>> assert str(baz) == '<Baz(5)>'
|
372 |
-
"""
|
373 |
-
|
374 |
-
def __nice__(self):
|
375 |
-
"""str: a "nice" summary string describing this module"""
|
376 |
-
if hasattr(self, "__len__"):
|
377 |
-
# It is a common pattern for objects to use __len__ in __nice__
|
378 |
-
# As a convenience we define a default __nice__ for these objects
|
379 |
-
return str(len(self))
|
380 |
-
else:
|
381 |
-
# In all other cases force the subclass to overload __nice__
|
382 |
-
raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}")
|
383 |
-
|
384 |
-
def __repr__(self):
|
385 |
-
"""str: the string of the module"""
|
386 |
-
try:
|
387 |
-
nice = self.__nice__()
|
388 |
-
classname = self.__class__.__name__
|
389 |
-
return f"<{classname}({nice}) at {hex(id(self))}>"
|
390 |
-
except NotImplementedError as ex:
|
391 |
-
warnings.warn(str(ex), category=RuntimeWarning)
|
392 |
-
return object.__repr__(self)
|
393 |
-
|
394 |
-
def __str__(self):
|
395 |
-
"""str: the string of the module"""
|
396 |
-
try:
|
397 |
-
classname = self.__class__.__name__
|
398 |
-
nice = self.__nice__()
|
399 |
-
return f"<{classname}({nice})>"
|
400 |
-
except NotImplementedError as ex:
|
401 |
-
warnings.warn(str(ex), category=RuntimeWarning)
|
402 |
-
return object.__repr__(self)
|
403 |
-
|
404 |
-
|
405 |
-
def ensure_rng(rng=None):
|
406 |
-
"""Coerces input into a random number generator.
|
407 |
-
|
408 |
-
If the input is None, then a global random state is returned.
|
409 |
-
|
410 |
-
If the input is a numeric value, then that is used as a seed to construct a
|
411 |
-
random state. Otherwise the input is returned as-is.
|
412 |
-
|
413 |
-
Adapted from [1]_.
|
414 |
-
|
415 |
-
Args:
|
416 |
-
rng (int | numpy.random.RandomState | None):
|
417 |
-
if None, then defaults to the global rng. Otherwise this can be an
|
418 |
-
integer or a RandomState class
|
419 |
-
Returns:
|
420 |
-
(numpy.random.RandomState) : rng -
|
421 |
-
a numpy random number generator
|
422 |
-
|
423 |
-
References:
|
424 |
-
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
|
425 |
-
"""
|
426 |
-
|
427 |
-
if rng is None:
|
428 |
-
rng = np.random.mtrand._rand
|
429 |
-
elif isinstance(rng, int):
|
430 |
-
rng = np.random.RandomState(rng)
|
431 |
-
else:
|
432 |
-
rng = rng
|
433 |
-
return rng
|
434 |
-
|
435 |
-
|
436 |
-
def random_boxes(num=1, scale=1, rng=None):
|
437 |
-
"""Simple version of ``kwimage.Boxes.random``
|
438 |
-
|
439 |
-
Returns:
|
440 |
-
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
|
441 |
-
|
442 |
-
References:
|
443 |
-
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
|
444 |
-
|
445 |
-
Example:
|
446 |
-
>>> num = 3
|
447 |
-
>>> scale = 512
|
448 |
-
>>> rng = 0
|
449 |
-
>>> boxes = random_boxes(num, scale, rng)
|
450 |
-
>>> print(boxes)
|
451 |
-
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
|
452 |
-
[216.9113, 330.6978, 224.0446, 456.5878],
|
453 |
-
[405.3632, 196.3221, 493.3953, 270.7942]])
|
454 |
-
"""
|
455 |
-
rng = ensure_rng(rng)
|
456 |
-
|
457 |
-
tlbr = rng.rand(num, 4).astype(np.float32)
|
458 |
-
|
459 |
-
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
|
460 |
-
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
|
461 |
-
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
|
462 |
-
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
|
463 |
-
|
464 |
-
tlbr[:, 0] = tl_x * scale
|
465 |
-
tlbr[:, 1] = tl_y * scale
|
466 |
-
tlbr[:, 2] = br_x * scale
|
467 |
-
tlbr[:, 3] = br_y * scale
|
468 |
-
|
469 |
-
boxes = torch.from_numpy(tlbr)
|
470 |
-
return boxes
|
471 |
-
|
472 |
-
|
473 |
-
class ModelEma(torch.nn.Module):
|
474 |
-
def __init__(self, model, decay=0.9997, device=None):
|
475 |
-
super(ModelEma, self).__init__()
|
476 |
-
# make a copy of the model for accumulating moving average of weights
|
477 |
-
self.module = deepcopy(model)
|
478 |
-
self.module.eval()
|
479 |
-
|
480 |
-
# import ipdb; ipdb.set_trace()
|
481 |
-
|
482 |
-
self.decay = decay
|
483 |
-
self.device = device # perform ema on different device from model if set
|
484 |
-
if self.device is not None:
|
485 |
-
self.module.to(device=device)
|
486 |
-
|
487 |
-
def _update(self, model, update_fn):
|
488 |
-
with torch.no_grad():
|
489 |
-
for ema_v, model_v in zip(
|
490 |
-
self.module.state_dict().values(), model.state_dict().values()
|
491 |
-
):
|
492 |
-
if self.device is not None:
|
493 |
-
model_v = model_v.to(device=self.device)
|
494 |
-
ema_v.copy_(update_fn(ema_v, model_v))
|
495 |
-
|
496 |
-
def update(self, model):
|
497 |
-
self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m)
|
498 |
-
|
499 |
-
def set(self, model):
|
500 |
-
self._update(model, update_fn=lambda e, m: m)
|
501 |
-
|
502 |
-
|
503 |
-
class BestMetricSingle:
|
504 |
-
def __init__(self, init_res=0.0, better="large") -> None:
|
505 |
-
self.init_res = init_res
|
506 |
-
self.best_res = init_res
|
507 |
-
self.best_ep = -1
|
508 |
-
|
509 |
-
self.better = better
|
510 |
-
assert better in ["large", "small"]
|
511 |
-
|
512 |
-
def isbetter(self, new_res, old_res):
|
513 |
-
if self.better == "large":
|
514 |
-
return new_res > old_res
|
515 |
-
if self.better == "small":
|
516 |
-
return new_res < old_res
|
517 |
-
|
518 |
-
def update(self, new_res, ep):
|
519 |
-
if self.isbetter(new_res, self.best_res):
|
520 |
-
self.best_res = new_res
|
521 |
-
self.best_ep = ep
|
522 |
-
return True
|
523 |
-
return False
|
524 |
-
|
525 |
-
def __str__(self) -> str:
|
526 |
-
return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep)
|
527 |
-
|
528 |
-
def __repr__(self) -> str:
|
529 |
-
return self.__str__()
|
530 |
-
|
531 |
-
def summary(self) -> dict:
|
532 |
-
return {
|
533 |
-
"best_res": self.best_res,
|
534 |
-
"best_ep": self.best_ep,
|
535 |
-
}
|
536 |
-
|
537 |
-
|
538 |
-
class BestMetricHolder:
|
539 |
-
def __init__(self, init_res=0.0, better="large", use_ema=False) -> None:
|
540 |
-
self.best_all = BestMetricSingle(init_res, better)
|
541 |
-
self.use_ema = use_ema
|
542 |
-
if use_ema:
|
543 |
-
self.best_ema = BestMetricSingle(init_res, better)
|
544 |
-
self.best_regular = BestMetricSingle(init_res, better)
|
545 |
-
|
546 |
-
def update(self, new_res, epoch, is_ema=False):
|
547 |
-
"""
|
548 |
-
return if the results is the best.
|
549 |
-
"""
|
550 |
-
if not self.use_ema:
|
551 |
-
return self.best_all.update(new_res, epoch)
|
552 |
-
else:
|
553 |
-
if is_ema:
|
554 |
-
self.best_ema.update(new_res, epoch)
|
555 |
-
return self.best_all.update(new_res, epoch)
|
556 |
-
else:
|
557 |
-
self.best_regular.update(new_res, epoch)
|
558 |
-
return self.best_all.update(new_res, epoch)
|
559 |
-
|
560 |
-
def summary(self):
|
561 |
-
if not self.use_ema:
|
562 |
-
return self.best_all.summary()
|
563 |
-
|
564 |
-
res = {}
|
565 |
-
res.update({f"all_{k}": v for k, v in self.best_all.summary().items()})
|
566 |
-
res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()})
|
567 |
-
res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()})
|
568 |
-
return res
|
569 |
-
|
570 |
-
def __repr__(self) -> str:
|
571 |
-
return json.dumps(self.summary(), indent=2)
|
572 |
-
|
573 |
-
def __str__(self) -> str:
|
574 |
-
return self.__repr__()
|
575 |
-
|
576 |
-
|
577 |
-
def targets_to(targets: List[Dict[str, Any]], device):
|
578 |
-
"""Moves the target dicts to the given device."""
|
579 |
-
excluded_keys = [
|
580 |
-
"questionId",
|
581 |
-
"tokens_positive",
|
582 |
-
"strings_positive",
|
583 |
-
"tokens",
|
584 |
-
"dataset_name",
|
585 |
-
"sentence_id",
|
586 |
-
"original_img_id",
|
587 |
-
"nb_eval",
|
588 |
-
"task_id",
|
589 |
-
"original_id",
|
590 |
-
"token_span",
|
591 |
-
"caption",
|
592 |
-
"dataset_type",
|
593 |
-
]
|
594 |
-
return [
|
595 |
-
{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets
|
596 |
-
]
|
597 |
-
|
598 |
-
|
599 |
-
def get_phrases_from_posmap(
|
600 |
-
posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer
|
601 |
-
)->str:
|
602 |
-
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
|
603 |
-
if posmap.dim() == 1:
|
604 |
-
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
|
605 |
-
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
|
606 |
-
return tokenizer.decode(token_ids)
|
607 |
-
else:
|
608 |
-
raise NotImplementedError("posmap must be 1-dim")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/ui/api.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
import os, sys
|
2 |
-
import utils
|
3 |
-
import uuid
|
4 |
-
import json
|
5 |
-
import subprocess, threading
|
6 |
-
|
7 |
-
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
|
8 |
-
REPO_DIR = os.path.dirname(FILE_DIR)
|
9 |
-
STATE_DIR = os.path.join(FILE_DIR, "state")
|
10 |
-
sys.path.append(REPO_DIR)
|
11 |
-
if not os.path.exists(STATE_DIR):
|
12 |
-
os.mkdir(STATE_DIR)
|
13 |
-
import time
|
14 |
-
|
15 |
-
|
16 |
-
def get_openai_api_key():
|
17 |
-
return os.getenv("OPENAI_API_KEY")
|
18 |
-
|
19 |
-
|
20 |
-
running_apis = []
|
21 |
-
|
22 |
-
|
23 |
-
def get_state(state_file):
|
24 |
-
with open(state_file, "r") as f:
|
25 |
-
state = json.load(f)
|
26 |
-
return state
|
27 |
-
|
28 |
-
|
29 |
-
def set_state(state_file, state):
|
30 |
-
with open(state_file, "w") as f:
|
31 |
-
json.dump(state, f)
|
32 |
-
|
33 |
-
|
34 |
-
class AutoAPI:
|
35 |
-
def __init__(self, openai_key, ai_name, ai_role, top_5_goals):
|
36 |
-
self.openai_key = openai_key
|
37 |
-
hex = uuid.uuid4().hex
|
38 |
-
print(hex)
|
39 |
-
self.state_file = os.path.join(STATE_DIR, f"state_{hex}.json")
|
40 |
-
self.log_file = os.path.join(STATE_DIR, f"log_{hex}.json")
|
41 |
-
|
42 |
-
newline = "\n"
|
43 |
-
with open(os.path.join(REPO_DIR, "ai_settings.yaml"), "w") as f:
|
44 |
-
f.write(
|
45 |
-
f"""ai_goals:
|
46 |
-
{newline.join([f'- {goal[0]}' for goal in top_5_goals if goal[0]])}
|
47 |
-
ai_name: {ai_name}
|
48 |
-
ai_role: {ai_role}
|
49 |
-
"""
|
50 |
-
)
|
51 |
-
state = {
|
52 |
-
"pending_input": None,
|
53 |
-
"awaiting_input": False,
|
54 |
-
"messages": [],
|
55 |
-
"last_message_read_index": -1,
|
56 |
-
}
|
57 |
-
set_state(self.state_file, state)
|
58 |
-
|
59 |
-
with open(self.log_file, "w") as f:
|
60 |
-
subprocess.Popen(
|
61 |
-
[
|
62 |
-
"python",
|
63 |
-
os.path.join(REPO_DIR, "ui", "api.py"),
|
64 |
-
openai_key,
|
65 |
-
self.state_file,
|
66 |
-
],
|
67 |
-
cwd=REPO_DIR,
|
68 |
-
stdout=f,
|
69 |
-
stderr=f,
|
70 |
-
)
|
71 |
-
|
72 |
-
def send_message(self, message="Y"):
|
73 |
-
state = get_state(self.state_file)
|
74 |
-
state["pending_input"] = message
|
75 |
-
state["awaiting_input"] = False
|
76 |
-
set_state(self.state_file, state)
|
77 |
-
|
78 |
-
def get_chatbot_response(self):
|
79 |
-
while True:
|
80 |
-
state = get_state(self.state_file)
|
81 |
-
if (
|
82 |
-
state["awaiting_input"]
|
83 |
-
and state["last_message_read_index"] >= len(state["messages"]) - 1
|
84 |
-
):
|
85 |
-
break
|
86 |
-
if state["last_message_read_index"] >= len(state["messages"]) - 1:
|
87 |
-
time.sleep(1)
|
88 |
-
else:
|
89 |
-
state["last_message_read_index"] += 1
|
90 |
-
title, content = state["messages"][state["last_message_read_index"]]
|
91 |
-
yield (f"**{title.strip()}** " if title else "") + utils.remove_color(
|
92 |
-
content
|
93 |
-
).replace("\n", "<br />")
|
94 |
-
set_state(self.state_file, state)
|
95 |
-
|
96 |
-
|
97 |
-
if __name__ == "__main__":
|
98 |
-
print(sys.argv)
|
99 |
-
_, openai_key, state_file = sys.argv
|
100 |
-
os.environ["OPENAI_API_KEY"] = openai_key
|
101 |
-
import autogpt.config.config
|
102 |
-
from autogpt.logs import logger
|
103 |
-
from autogpt.cli import main
|
104 |
-
import autogpt.utils
|
105 |
-
from autogpt.spinner import Spinner
|
106 |
-
|
107 |
-
def add_message(title, content):
|
108 |
-
state = get_state(state_file)
|
109 |
-
state["messages"].append((title, content))
|
110 |
-
set_state(state_file, state)
|
111 |
-
|
112 |
-
def typewriter_log(title="", title_color="", content="", *args, **kwargs):
|
113 |
-
add_message(title, content)
|
114 |
-
|
115 |
-
def warn(message, title="", *args, **kwargs):
|
116 |
-
add_message(title, message)
|
117 |
-
|
118 |
-
def error(title, message="", *args, **kwargs):
|
119 |
-
add_message(title, message)
|
120 |
-
|
121 |
-
def clean_input(prompt=""):
|
122 |
-
add_message(None, prompt)
|
123 |
-
state = get_state(state_file)
|
124 |
-
state["awaiting_input"] = True
|
125 |
-
set_state(state_file, state)
|
126 |
-
while state["pending_input"] is None:
|
127 |
-
state = get_state(state_file)
|
128 |
-
print("Waiting for input...")
|
129 |
-
time.sleep(1)
|
130 |
-
print("Got input")
|
131 |
-
pending_input = state["pending_input"]
|
132 |
-
state["pending_input"] = None
|
133 |
-
set_state(state_file, state)
|
134 |
-
return pending_input
|
135 |
-
|
136 |
-
def spinner_start():
|
137 |
-
add_message(None, "Thinking...")
|
138 |
-
|
139 |
-
logger.typewriter_log = typewriter_log
|
140 |
-
logger.warn = warn
|
141 |
-
logger.error = error
|
142 |
-
autogpt.utils.clean_input = clean_input
|
143 |
-
Spinner.spin = spinner_start
|
144 |
-
|
145 |
-
sys.argv = sys.argv[:1]
|
146 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chintan-Donda/KKMS-KSSW-HF/src/mandi_price.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
|
3 |
-
|
4 |
-
class MANDI_PRICE:
|
5 |
-
def __init__(self):
|
6 |
-
self.base_url = "https://enam.gov.in/web/Ajax_ctrl/trade_data_list"
|
7 |
-
# "https://enam.gov.in/web/dashboard/trade-data",
|
8 |
-
# "https://enam.gov.in/web/dashboard/trade_data_list",
|
9 |
-
|
10 |
-
|
11 |
-
def get_mandi_price(self,
|
12 |
-
state_name,
|
13 |
-
apmc_name,
|
14 |
-
commodity_name,
|
15 |
-
from_date,
|
16 |
-
to_date
|
17 |
-
):
|
18 |
-
# Prepare the payload for POST request
|
19 |
-
payload = f"language=en&stateName={state_name}&apmcName={apmc_name}&commodityName={commodity_name}&fromDate={from_date}&toDate={to_date}"
|
20 |
-
|
21 |
-
headers = {
|
22 |
-
"Content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
23 |
-
"Referer": "https://enam.gov.in/web/dashboard/trade-data",
|
24 |
-
"Accept": "application/json, text/javascript, */*; q=0.01",
|
25 |
-
}
|
26 |
-
|
27 |
-
response = requests.post(
|
28 |
-
self.base_url,
|
29 |
-
json=payload,
|
30 |
-
headers=headers,
|
31 |
-
)
|
32 |
-
|
33 |
-
return response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|