parquet-converter commited on
Commit
a91c254
·
1 Parent(s): 0cc28eb

Update parquet files (step 28 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/DaVinci Resolve Download A Reddit Users Solution to the Blackmagic Design Website.md +0 -25
  2. spaces/1gistliPinn/ChatGPT4/Examples/Billu Barber 2009 Blu Ray 720p X264 Darkboy24 !FREE!.md +0 -18
  3. spaces/1gistliPinn/ChatGPT4/Examples/Cartelle Del Gioco Sinco FREE.md +0 -22
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Air I Breathe by Nicole C. Mullen Mp3 and Lyrics Download.md +0 -126
  5. spaces/1phancelerku/anime-remove-background/CarX Drift Racing 2 MOD APK Offline Mode with Realistic Physics and Graphics.md +0 -88
  6. spaces/1phancelerku/anime-remove-background/College Romance Season 1 Episode 1 The First Step of a Crazy Love Adventure.md +0 -152
  7. spaces/1phancelerku/anime-remove-background/Download Fid Q Songs The Best of Tanzanian Hip Hop.md +0 -132
  8. spaces/1phancelerku/anime-remove-background/ForYou Pakistan - TikTok The Ultimate App for Viral Content Creators.md +0 -131
  9. spaces/232labs/VToonify/vtoonify/model/encoder/__init__.py +0 -0
  10. spaces/4Taps/SadTalker/src/audio2pose_models/audio_encoder.py +0 -64
  11. spaces/52Hz/SRMNet_real_world_denoising/main_test_SRMNet.py +0 -86
  12. spaces/7hao/bingo/src/components/chat-message.tsx +0 -93
  13. spaces/A666sxr/Genshin_TTS/pqmf.py +0 -116
  14. spaces/AIConsultant/MusicGen/tests/losses/__init__.py +0 -5
  15. spaces/AIFILMS/StyleGANEX/scripts/train.py +0 -32
  16. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/hifigan/models.py +0 -174
  17. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/midas_net.py +0 -76
  18. spaces/AILab-CVC/EvalCrafter/src/auto_leaderboard/model_metadata_type.py +0 -30
  19. spaces/AIZ2H/06-Streamlit-NLP-Image-Semantic-Search-Images/README.md +0 -13
  20. spaces/AIZ2H/Gradio-Multilingual-ImageToOCR/app.py +0 -54
  21. spaces/ASJMO/freegpt/g4f/Provider/Providers/AiService.py +0 -40
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/Factory.d.ts +0 -5
  23. spaces/Aki004/herta-so-vits/resample.py +0 -48
  24. spaces/AlexWang/lama/saicinpainting/training/modules/squeeze_excitation.py +0 -20
  25. spaces/AmrElsayeh/Interior_style_detector/README.md +0 -13
  26. spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py +0 -1007
  27. spaces/Andreean/Sentiment-Analysis-Bitcoin/app.py +0 -34
  28. spaces/Anustup/NS_AI_LABS/README.md +0 -66
  29. spaces/Apex-X/Tm/README.md +0 -14
  30. spaces/Apex-X/nono/.github/ISSUE_TEMPLATE/installation.md +0 -12
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/install.py +0 -775
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/__init__.py +0 -92
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/unpacking.py +0 -257
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py +0 -11
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py +0 -14
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/samplers/grouped_batch_sampler.py +0 -47
  37. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_sampler.py +0 -111
  38. spaces/BaiyuS/Real-CUGAN-YZ/upcunet_v3.py +0 -714
  39. spaces/BertChristiaens/youtube-dl/app.py +0 -72
  40. spaces/BetterAPI/BetterChat/src/lib/shareConversation.ts +0 -27
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_wrap.py +0 -56
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/bar.py +0 -94
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/exceptions.py +0 -323
  44. spaces/Boadiwaa/Recipes/openai/api_resources/fine_tune.py +0 -87
  45. spaces/BuBBLe1q/anything-v3.0/README.md +0 -13
  46. spaces/CVPR/LIVE/thrust/thrust/detail/config/device_system.h +0 -61
  47. spaces/CVPR/LIVE/thrust/thrust/random/discard_block_engine.h +0 -252
  48. spaces/CVPR/WALT/mmdet/apis/train.py +0 -185
  49. spaces/CVPR/regionclip-demo/config.py +0 -245
  50. spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/web_requests.py +0 -190
spaces/1acneusushi/gradio-2dmoleculeeditor/data/DaVinci Resolve Download A Reddit Users Solution to the Blackmagic Design Website.md DELETED
@@ -1,25 +0,0 @@
1
-
2
- <h1>How to Download DaVinci Resolve for Free</h1>
3
- <p>DaVinci Resolve is a powerful and versatile video editing software that offers features such as color correction, visual effects, audio post-production, and more. It is used by professionals and hobbyists alike for various projects, from films and TV shows to YouTube videos and podcasts.</p>
4
- <p>If you want to try DaVinci Resolve for yourself, you can download it for free from the official website of Blackmagic Design, the company that develops and distributes the software. However, finding the download link on their website can be tricky, as it is not very prominent or easy to navigate. Fortunately, there is a simpler way to access the download page, thanks to a Reddit user who shared a direct link to it.</p>
5
- <h2>davinci resolve download reddit</h2><br /><p><b><b>DOWNLOAD</b> &bull; <a href="https://byltly.com/2uKwda">https://byltly.com/2uKwda</a></b></p><br /><br />
6
- <h2>Steps to Download DaVinci Resolve for Free</h2>
7
- <ol>
8
- <li>Go to <a href="https://www.reddit.com/r/davinciresolve/comments/l73wyu/found_a_link_for_just_davinci_downloads/">this Reddit post</a> by u/whyareyouemailingme, who found a link that shows only DaVinci Resolve download links.</li>
9
- <li>Click on the link that says <a href="https://www.blackmagicdesign.com/support/family/davinci-resolve-and-fusion">https://www.blackmagicdesign.com/support/family/davinci-resolve-and-fusion</a>. This will take you to the support page of Blackmagic Design, where you can see all the available versions of DaVinci Resolve and Fusion, another software for visual effects and motion graphics.</li>
10
- <li>Choose the version of DaVinci Resolve that you want to download. You can either download the latest version (18.5 at the time of writing this article) or an older version if you have compatibility issues with your system or project. You can also choose between the Studio version, which requires a paid license and offers more features and performance, or the Free version, which has some limitations but is still very capable.</li>
11
- <li>Click on the Download button next to your chosen version. This will prompt you to fill out a registration form with your name, email address, country, and some other information. You can also opt-in or opt-out of receiving newsletters and updates from Blackmagic Design.</li>
12
- <li>After filling out the form, click on Register and Download. This will start the download process of the installer file for DaVinci Resolve. Depending on your internet speed and the size of the file, this may take some time.</li>
13
- <li>Once the download is complete, locate the installer file on your computer and run it. Follow the instructions on the screen to install DaVinci Resolve on your system. You may need to restart your computer after the installation is done.</li>
14
- <li>Launch DaVinci Resolve and enjoy editing your videos!</li>
15
- </ol>
16
- <h2>Tips and Tricks for Using DaVinci Resolve</h2>
17
- <ul>
18
- <li>If you are new to DaVinci Resolve, you can check out some tutorials and guides on their official website <a href="https://www.blackmagicdesign.com/products/davinciresolve/training">here</a>. You can also find many helpful videos on YouTube and other platforms from various creators who share their tips and tricks for using the software.</li>
19
- <li>If you encounter any issues or bugs with DaVinci Resolve, you can report them on their official forum <a href="https://forum.blackmagicdesign.com/viewforum.php?f=21">here</a>. You can also ask questions and get help from other users who may have faced similar problems or have solutions for them.</li>
20
- <li>If you want to stay updated on the latest news and features of DaVinci Resolve, you can follow their official social media accounts on <a href="https://www.facebook.com/blackmagicdesign/">Facebook</a>, <a href="https://twitter.com/Blackmagic_News">Twitter</a>, <a href="https://www.instagram.com/blackmagicnewsofficial/">Instagram</a>, and <a href="https://www.youtube.com/user/BlackmagicDesign">YouTube</a>. You can also join their subreddit <a href="https://www.reddit.com/r/davinciresolve/">r/davinciresolve</a>, where you can find useful resources, discussions, feedback, and inspiration from other users.</li>
21
- </ul>
22
- <h2>Conclusion</h2>
23
- <p>DaVinci Resolve</p> ddb901b051<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Billu Barber 2009 Blu Ray 720p X264 Darkboy24 !FREE!.md DELETED
@@ -1,18 +0,0 @@
1
-
2
- <h1>Review: Billu Barber (2009) Blu Ray 720p X264 Darkboy24</h1>
3
- <p>Billu Barber is a 2009 Hindi comedy-drama film directed by Priyadarshan and starring Irrfan Khan, Lara Dutta, Shah Rukh Khan and Om Puri. The film is a remake of the Malayalam film Kadha Parayumbol (2007), which was also remade in Tamil as Kuselan (2008). The film tells the story of Billu (Irrfan Khan), a poor barber who lives in a village with his wife Bindiya (Lara Dutta) and their two children. His life changes when a famous actor Sahir Khan (Shah Rukh Khan), who happens to be his childhood friend, comes to shoot a film in his village. Billu becomes the center of attention as everyone wants to meet Sahir through him, but he is too shy and humble to approach his old friend.</p>
4
- <p>The film was produced by Red Chillies Entertainment and distributed by Eros International. It was released on February 13, 2009 and received positive reviews from critics and audiences. The film was praised for its simple yet touching story, its humor, its performances, especially by Irrfan Khan and Shah Rukh Khan, and its music by Pritam. The film was also a commercial success, grossing over ₹100 crore worldwide.</p>
5
- <h2>Billu Barber 2009 Blu Ray 720p X264 Darkboy24</h2><br /><p><b><b>Download</b> - <a href="https://imgfil.com/2uy0hn">https://imgfil.com/2uy0hn</a></b></p><br /><br />
6
- <p>The Blu Ray version of the film was released by Darkboy24, a popular torrent uploader who specializes in high-quality Hindi movies. The Blu Ray rip has a resolution of 720p and a bitrate of X264. The audio quality is also excellent, with a 5.1 channel surround sound. The file size is about 1 GB and can be downloaded from various torrent sites. The Blu Ray rip also includes English subtitles for non-Hindi speakers.</p>
7
- <p>Billu Barber is a heartwarming and entertaining film that showcases the bond of friendship and the value of simplicity. It is a must-watch for fans of Irrfan Khan, Shah Rukh Khan and Priyadarshan. The Blu Ray rip by Darkboy24 is one of the best ways to enjoy this film in high definition.</p>
8
-
9
- <p>The film also features some cameo appearances by other Bollywood stars, such as Kareena Kapoor, Deepika Padukone, Priyanka Chopra and Rajpal Yadav. They play themselves as actors who work with Sahir Khan in his film. The film also has some references to other films by Shah Rukh Khan and Priyadarshan, such as Om Shanti Om (2007) and Hera Pheri (2000).</p>
10
- <p>The film was nominated for several awards, such as the Filmfare Awards, the IIFA Awards and the Screen Awards. It won the Best Actor (Critics) award for Irrfan Khan at the Filmfare Awards and the Best Supporting Actor award for Shah Rukh Khan at the Screen Awards. The film also received a special mention at the National Film Awards for its portrayal of the rural life and culture of India.</p>
11
- <p>Billu Barber is a film that celebrates friendship, family and humanity. It is a film that will make you laugh, cry and smile. It is a film that you will remember for a long time. The Blu Ray rip by Darkboy24 is a great way to experience this film in high quality.</p>
12
-
13
- <p>The film also has a strong social message about the importance of education and the dignity of labor. The film shows how Billu, despite being poor and illiterate, is respected and loved by his family and friends for his honesty and kindness. The film also shows how Sahir Khan, despite being rich and famous, is humble and generous towards his old friend and his village. The film also criticizes the hypocrisy and greed of some people who try to exploit Billu's friendship with Sahir for their own benefits.</p>
14
- <p>The film also has a beautiful soundtrack composed by Pritam, with lyrics by Gulzar. The film features nine songs, sung by various singers such as Sukhwinder Singh, Rahat Fateh Ali Khan, Neeraj Shridhar, Sunidhi Chauhan and Abhijeet. Some of the popular songs from the film are "Marjaani", "Khudaya Khair", "Love Mera Hit Hit" and "You Get Me Rockin & Reeling". The songs are a mix of different genres, such as folk, qawwali, pop and rock. The songs also enhance the mood and emotions of the film.</p>
15
- <p>Billu Barber is a film that will touch your heart and soul. It is a film that will make you appreciate the true meaning of friendship and happiness. It is a film that will inspire you to be a better person. The Blu Ray rip by Darkboy24 is an excellent way to watch this film in high definition.</p>
16
- <p></p> d5da3c52bf<br />
17
- <br />
18
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cartelle Del Gioco Sinco FREE.md DELETED
@@ -1,22 +0,0 @@
1
-
2
- <h1>Cartelle del gioco sinco: il gioco da tavolo natalizio di origine napoletana</h1>
3
- <p>Se siete alla ricerca di un gioco da tavolo divertente e originale da fare con la famiglia o gli amici durante le feste natalizie, potreste provare le cartelle del gioco sinco. Si tratta di un gioco inventato a Napoli nel 1983 da Emilio Salvatore, un merciaio che si ispirò al bingo e alla tombola per creare una nuova variante con le carte napoletane[^1^] [^2^].</p>
4
- <p>Le cartelle del gioco sinco sono composte da 25 caselle con le figure delle carte napoletane, dal 1 al 10 di ogni seme (coppe, spade, denari e bastoni). Ogni cartella ha una combinazione diversa di carte e ogni giocatore può acquistarne quante ne vuole[^2^] [^3^]. Il gioco richiede anche un mazzo di carte napoletane, delle fiches per segnare le caselle e cinque contenitori per i premi[^2^].</p>
5
- <h2>Cartelle del gioco sinco</h2><br /><p><b><b>Download</b> &#10031; <a href="https://imgfil.com/2uy20F">https://imgfil.com/2uy20F</a></b></p><br /><br />
6
- <p>Il gioco si svolge così: si sceglie un conduttore che estrae le carte dal mazzo e le annuncia agli altri giocatori. Chi ha la carta estratta sulla propria cartella la copre con una fiche. Il primo giocatore che completa una delle cinque combinazioni possibili vince il premio corrispondente[^2^]. Le combinazioni sono le seguenti:</p>
7
- <ul>
8
- <li><strong>Centro</strong>: si copre la casella centrale della cartella.</li>
9
- <li><strong>Angolo</strong>: si coprono le quattro caselle agli angoli della cartella.</li>
10
- <li><strong>Poker</strong>: si coprono le quattro caselle in alto della cartella.</li>
11
- <li><strong>Rombo</strong>: si coprono le cinque caselle che formano un rombo intorno alla casella centrale.</li>
12
- <li><strong>Sinco</strong>: si coprono tutte le caselle della cartella.</li>
13
- </ul>
14
- <p>Il nome sinco deriva dallo spagnolo e significa cinque, proprio perché ci sono cinque combinazioni possibili[^1^] [^2^]. Ogni contenitore ha un valore diverso in base alla difficoltà della combinazione. Il sinco è il premio più alto e il centro è il più basso[^2^]. Il conduttore raccoglie i soldi dei giocatori e li distribuisce nei contenitori prima di iniziare il gioco[^2^]. Il gioco termina quando tutti i premi sono stati vinti o quando non ci sono più carte da estrarre.</p>
15
- <p>Le cartelle del gioco sinco sono un modo simpatico e coinvolgente di passare il tempo in compagnia, mescolando fortuna e strategia. Il gioco è diventato una tradizione natalizia a Napoli e in altre città italiane, dove si trova facilmente nei negozi di giocattoli o nei mercatini[^1^] [^2^]. Se volete provare questo gioco originale e divertente, non vi resta che procurarvi le cartelle del gioco sinco e sfidare i vostri amici o parenti a colpi di carte napoletane!</p>
16
-
17
- <p>Se vi state chiedendo come sono nate le cartelle del gioco sinco, la storia è piuttosto curiosa. L'ideatore del gioco, Emilio Salvatore, ebbe l'ispirazione durante una vacanza in crociera con la sua famiglia. Tra le varie attività di bordo, si divertì a giocare al bingo, un gioco di origine americana che ricorda la tombola. Fu così che pensò di creare un gioco simile ma con le carte napoletane, che sono tipiche della sua città e della sua cultura .</p>
18
- <p>Tornato a Napoli, Salvatore realizzò le prime cartelle del gioco sinco con l'aiuto di un grafico e le provò con i suoi amici e parenti. Il gioco ebbe subito successo e Salvatore decise di produrlo in serie limitata e di venderlo nella sua merceria nel centro storico di Napoli, al Corso Vittorio Emanuele . La merceria è ancora esistente e nella vetrina si può ammirare il gioco originale conservato come una reliquia.</p>
19
- <p></p>
20
- <p>Il gioco del sinco attirò l'attenzione di alcuni acquirenti interessati a distribuirlo su larga scala, ma Salvatore rifiutò tutte le offerte e preferì mantenere i diritti della sua creazione. Il gioco rimase quindi un prodotto artigianale e locale, che si diffuse per passaparola tra i napoletani e gli appassionati di giochi da tavolo . Oggi il gioco del sinco è considerato una tradizione natalizia napoletana e una testimonianza della creatività e dell'ingegno di questa città.</p> d5da3c52bf<br />
21
- <br />
22
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Air I Breathe by Nicole C. Mullen Mp3 and Lyrics Download.md DELETED
@@ -1,126 +0,0 @@
1
- <br />
2
- <h1>You Are The Air I Breathe Mp3 Download: How to Find and Enjoy This Inspirational Song</h1>
3
- <p>Have you ever heard a song that touched your soul and lifted your spirit? A song that made you feel closer to God and grateful for His presence in your life? A song that reminded you of His love and grace? If you are looking for such a song, then you should listen to You Are The Air I Breathe by Jerry K. This is a beautiful gospel song that expresses how much we depend on God for everything. In this article, we will tell you more about this song, how to download it as an mp3 file, and how to enjoy it to the fullest.</p>
4
- <h2>What is You Are The Air I Breathe?</h2>
5
- <p>You Are The Air I Breathe is a gospel song that was released in 2017 by Jerry K, a Nigerian singer and songwriter. The song is also known as Air I Breathe or The Air I Breath. It is a worship song that praises God as the source of our life, our peace, our joy, and our strength. It is a song that acknowledges how much we need God in every moment of our existence.</p>
6
- <h2>you are the air i breathe mp3 download</h2><br /><p><b><b>Download File</b> &#10004;&#10004;&#10004; <a href="https://urlin.us/2uSYeP">https://urlin.us/2uSYeP</a></b></p><br /><br />
7
- <h3>The Meaning and Message of the Song</h3>
8
- <p>The song has a simple but powerful message: God is everything to us. He is the air that we breathe, the water that we drink, the food that we eat. He is our healer, our provider, our protector, our redeemer. He is our father, our friend, our king, our lord. He is worthy of all our praise and worship. He is faithful and gracious to us. He never leaves us nor forsakes us. He is always with us and for us.</p>
9
- <h3>The Singer and Composer of the Song</h3>
10
- <h3>The Popularity and Impact of the Song</h3>
11
- <p>The song has become very popular among gospel music lovers, especially in Nigeria and other African countries. It has received millions of views and downloads on various platforms, such as YouTube, Spotify, iTunes, SoundCloud, among others. It has also been nominated and won several awards, such as the LIMA Awards, the AGMMA Awards, the GMA Awards, among others. The song has also impacted many lives and testimonies, as people have shared how the song has inspired them, comforted them, healed them, and drawn them closer to God.</p>
12
- <h2>How to Download You Are The Air I Breathe Mp3?</h2>
13
- <p>If you want to download You Are The Air I Breathe as an mp3 file, you might be wondering why you should do that and how you can do that. Well, we have some answers for you.</p>
14
- <h3>The Benefits of Downloading Mp3 Files</h3>
15
- <p>Mp3 files are digital audio files that can be played on various devices, such as computers, smartphones, tablets, mp3 players, etc. They are convenient and easy to use, as they can be stored, transferred, and shared without any hassle. They are also compatible with most media players and applications. They are also economical and efficient, as they take up less space and consume less data than other formats. They are also of high quality and fidelity, as they preserve the original sound and clarity of the audio.</p>
16
- <h3>The Best Websites to Download You Are The Air I Breathe Mp3</h3>
17
- <p>There are many websites that offer free or paid downloads of You Are The Air I Breathe mp3. However, not all of them are reliable or safe. Some of them might contain viruses, malware, or spyware that can harm your device or compromise your privacy. Some of them might also have low-quality or corrupted files that can ruin your listening experience. Therefore, you should be careful and selective when choosing a website to download You Are The Air I Breathe mp3. Here are some of the best websites that we recommend:</p>
18
- <table>
19
- <tr>
20
- <th>Website</th>
21
- <th>Features</th>
22
- </tr>
23
- <tr>
24
- <td><a href="">Gospel9ja.com</a></td>
25
- <td>- A Nigerian website that specializes in gospel music downloads<br>- Offers free and fast downloads of You Are The Air I Breathe mp3<br>- Provides a brief description and lyrics of the song<br>- Allows users to rate and comment on the song<br>- Has a user-friendly and mobile-responsive interface</td>
26
- </tr>
27
- <tr>
28
- <td><a href="">Mp3skull.com</a></td>
29
- <td>- A global website that offers a wide range of music downloads<br>- Offers free and easy downloads of You Are The Air I Breathe mp3<br>- Provides a preview and a download link of the song<br>- Allows users to search and browse by artist, genre, album, etc.<br>- Has a simple and minimalist design</td>
30
- </tr>
31
- <tr>
32
- <td><a href="">Naijaloaded.com.ng</a></td>
33
- <td>- A Nigerian website that features various entertainment content<br>- Offers free and secure downloads of You Are The Air I Breathe mp3<br>- Provides a detailed review and analysis of the song<br>- Allows users to stream and download the song<br>- Has a colorful and attractive layout</td>
34
- </tr>
35
- </table>
36
- <h3>The Steps to Download You Are The Air I Breathe Mp3</h3>
37
- <p>The steps to download You Are The Air I Breathe mp3 might vary depending on the website you choose. However, here are some general steps that you can follow:</p>
38
- <p>you are the air i breathe mat kearney mp3 download<br />
39
- jerry k air i breathe mp3 download free<br />
40
- you are the air i breathe lyrics and mp3<br />
41
- download air i breathe by jerry k audio<br />
42
- mat kearney air i breathe mp3 free download<br />
43
- you are the air i breathe gospel song mp3<br />
44
- air i breathe by jerry k video download<br />
45
- you are the air i breathe oh lord mp3 download<br />
46
- mat kearney air i breathe lyrics video<br />
47
- you are the balm of gilead mp3 download<br />
48
- air i breathe by jerry k instrumental<br />
49
- you are the rose of sharon mp3 song download<br />
50
- mat kearney air i breathe chords and tabs<br />
51
- you are my peace in the midst of storm mp3<br />
52
- air i breathe by jerry k ft frank edwards<br />
53
- you are the air i breathe hillsong worship mp3<br />
54
- mat kearney air i breathe album download zip<br />
55
- you are the air i breathe piano tutorial<br />
56
- air i breathe by jerry k live performance<br />
57
- you are the air i breathe christian song mp3<br />
58
- mat kearney air i breathe remix mp3 download<br />
59
- you are the air i breathe sheet music pdf<br />
60
- air i breathe by jerry k cover by nathaniel bassey<br />
61
- you are the air i breathe worship song mp3<br />
62
- mat kearney air i breathe acoustic version mp3<br />
63
- you are the air i breathe karaoke mp3 download<br />
64
- air i breathe by jerry k lyrics and chords<br />
65
- you are the air i breathe song meaning and analysis<br />
66
- mat kearney air i breathe spotify playlist<br />
67
- you are the air i breathe guitar lesson youtube<br />
68
- air i breathe by jerry k ringtone download mp3<br />
69
- you are the air i breathe background vocals mp3<br />
70
- mat kearney air i breathe shazam music discovery app[^1^]<br />
71
- you are the air i breathe praisezion gospel songs[^2^]<br />
72
- air i breathe by jerry k gospelsongs.com.ng[^3^]</p>
73
- <ol>
74
- <li>Visit the website that offers You Are The Air I Breathe mp3 download.</li>
75
- <li>Search for the song by typing its name or artist in the search box.</li>
76
- <li>Select the song from the search results or browse through the categories.</li>
77
- <li>Click on the download button or link that appears next to the song.</li>
78
- <li>Choose the format and quality of the file that you want to download.</li>
79
- <li>Save the file to your device or cloud storage.</li>
80
- <li>Enjoy listening to You Are The Air I Breathe mp3 anytime and anywhere.</li>
81
- </ol>
82
- <h2>How to Enjoy You Are The Air I Breathe Mp3?</h2>
83
- <p>Now that you have downloaded You Are The Air I Breathe mp3, you might be wondering how to enjoy it to the fullest. Well, we have some tips for you.</p>
84
- <h3>The Best Times and Places to Listen to the Song</h3>
85
- <p>You Are The Air I Breathe is a song that can be enjoyed at any time and place, as long as you have a device that can play mp3 files and a pair of headphones or speakers. However, some of the best times and places to listen to the song are:</p>
86
- <ul>
87
- <li>When you wake up in the morning, you can listen to the song as a way of starting your day with gratitude and praise to God.</li>
88
- <li>When you are going through a hard time, you can listen to the song as a way of finding comfort and hope in God's presence and promises.</li>
89
- <li>When you are feeling happy and blessed, you can listen to the song as a way of celebrating and thanking God for His goodness and mercy.</li>
90
- <li>When you are in your personal or family devotional time, you can listen to the song as a way of worshiping and adoring God with your whole heart.</li>
91
- <li>When you are in your car, office, or home, you can listen to the song as a way of creating a peaceful and joyful atmosphere around you.</li>
92
- </ul>
93
- <h3>The Best Ways to Share and Recommend the Song</h3>
94
- <p>You Are The Air I Breathe is a song that can be shared and recommended to anyone who loves gospel music or who needs to hear a message of God's love and grace. Some of the best ways to share and recommend the song are:</p>
95
- <ul>
96
- <li>You can send the mp3 file or the download link to your friends, family, or colleagues via email, text, or social media.</li>
97
- <li>You can create a playlist or a mixtape that includes You Are The Air I Breathe and other gospel songs that you like, and share it with others.</li>
98
- <li>You can write a review or a testimonial about how the song has impacted your life, and post it on your blog, website, or social media.</li>
99
- <li>You can sing or play the song in your church, school, or community, and invite others to join you.</li>
100
- <li>You can request or dedicate the song to someone on your favorite radio station or podcast.</li>
101
- </ul>
102
- <h3>The Best Resources to Learn More About the Song</h3>
103
- <p>If you want to learn more about You Are The Air I Breathe, such as its lyrics, chords, background story, etc., you can check out some of these resources:</p>
104
- <ul>
105
- <li><a href="">Jerry K's official website</a>, where you can find his biography, discography, events, contacts, etc.</li>
106
- <li><a href="">Jerry K's YouTube channel</a>, where you can watch his music videos, interviews, performances, etc.</li>
107
- <li><a href="">Jerry K's Facebook page</a>, where you can follow his updates, interact with him and his fans, etc.</li>
108
- <li><a href="">Jerry K's Instagram account</a>, where you can see his photos, stories, reels, etc.</li>
109
- <li><a href="">Jerry K's Twitter account</a>, where you can read his tweets, retweets, replies, etc.</li>
110
- </ul>
111
- <h2>Conclusion</h2>
112
- <p>You Are The Air I Breathe is a wonderful gospel song that expresses how much we depend on God for everything. It is a song that praises God as the source of our life, our peace, our joy, and our strength. It is a song that acknowledges how much we need God in every moment of our existence. In this article, we have told you more about this song, how to download it as an mp3 file, and how to enjoy it to the fullest. We hope that this article has been helpful and informative for you. We also hope that you will listen to You Are The Air I Breathe mp3 and experience its power and beauty for yourself. Thank you for reading this article. God bless you!</p>
113
- <h2>FAQs</h2>
114
- <p>Here are some frequently asked questions about You Are The Air I Breathe mp3:</p>
115
- <h4>Q: Where can I find the lyrics of You Are The Air I Breathe?</h4>
116
- <p>A: You can find the lyrics of You Are The Air I Breathe on <a href="">Gospel9ja.com</a>, <a href="">Lyrics.com</a>, <a href="">Musixmatch.com</a>, etc.</p>
117
- <h4>Q: How long is You Are The Air I Breathe?</h4>
118
- <p>A: You Are The Air I Breathe is 5 minutes and 31 seconds long.</p>
119
- <h4>Q: What genre is You Are The Air I Breathe?</h4>
120
- <p>A: You Are The Air I Breathe is a gospel song that belongs to the contemporary worship genre.</p>
121
- <h4>Q: Who are some other artists that sing similar songs to You Are The Air I Breathe?</h4>
122
- <p>A: Some other artists that sing similar songs to You Are The Air I Breathe are Sinach, Nathaniel Bassey, Frank Edwards, Mercy Chinwo, Eben, etc.</p>
123
- <h4>Q: How can I support Jerry K and his music ministry?</h4>
124
- <p>A: You can support Jerry K and his music ministry by buying his albums and singles, attending his concerts and events, praying for him and his family, donating to his cause, etc.</p> 197e85843d<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/CarX Drift Racing 2 MOD APK Offline Mode with Realistic Physics and Graphics.md DELETED
@@ -1,88 +0,0 @@
1
- <br />
2
- <h1>CarX Drift Racing 2 Mod APK Offline: A Guide for Racing and Drifting Enthusiasts</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of racing and drifting games, you might have heard of CarX Drift Racing 2, one of the most popular and realistic games in this genre. But did you know that you can enjoy this game even more with a mod apk offline version? In this article, we will tell you everything you need to know about CarX Drift Racing 2 mod apk offline, including its features, benefits, and how to download and install it on your device. So, buckle up and get ready for some adrenaline-pumping action!</p>
5
- <h3>What is CarX Drift Racing 2?</h3>
6
- <p>CarX Drift Racing 2 is a sequel to the original CarX Drift Racing game, which has over 50 million downloads on Google Play Store. It is a racing and drifting game that lets you experience the thrill of driving powerful cars on various tracks and terrains. You can choose from over 80 cars, each with its own characteristics and performance. You can also customize your cars with different paint jobs, decals, wheels, spoilers, and more. You can compete with other players online or offline, join clubs, participate in tournaments, and earn rewards.</p>
7
- <h2>carx drift racing 2 mod apk offline</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://jinyurl.com/2uNTJS">https://jinyurl.com/2uNTJS</a></b></p><br /><br />
8
- <h3>Why download CarX Drift Racing 2 mod apk offline?</h3>
9
- <p>While CarX Drift Racing 2 is a free-to-play game, it also has some in-app purchases that can enhance your gameplay. For example, you can buy more money and gold to unlock new cars and tracks, or upgrade your existing ones. However, not everyone can afford to spend real money on these items, or they might not have a stable internet connection to play online. That's why downloading CarX Drift Racing 2 mod apk offline is a great option. With this version, you can enjoy all the features of the game without spending a dime or worrying about your internet connection. You can play the game anytime and anywhere you want.</p>
10
- <h2>Features of CarX Drift Racing 2 mod apk offline</h2>
11
- <p>CarX Drift Racing 2 mod apk offline has many features that make it superior to the original version. Here are some of them:</p>
12
- <h3>Unlimited money and gold</h3>
13
- <p>With CarX Drift Racing 2 mod apk offline, you don't have to worry about running out of money or gold. You will have unlimited amounts of both currencies, which you can use to buy anything you want in the game. You can unlock all the cars and tracks, upgrade your cars to the max level, and buy any customization items you like. You can also use money and gold to enter tournaments and events, or buy boosters and power-ups.</p>
14
- <p>carx drift racing 2 mod apk unlimited money and gold<br />
15
- carx drift racing 2 mod apk latest version download<br />
16
- carx drift racing 2 mod apk android 1<br />
17
- carx drift racing 2 mod apk revdl<br />
18
- carx drift racing 2 mod apk obb<br />
19
- carx drift racing 2 mod apk rexdl<br />
20
- carx drift racing 2 mod apk happymod<br />
21
- carx drift racing 2 mod apk all cars unlocked<br />
22
- carx drift racing 2 mod apk free shopping<br />
23
- carx drift racing 2 mod apk no root<br />
24
- carx drift racing 2 mod apk data<br />
25
- carx drift racing 2 mod apk pure<br />
26
- carx drift racing 2 mod apk vip unlocked<br />
27
- carx drift racing 2 mod apk unlimited coins and gems<br />
28
- carx drift racing 2 mod apk full version<br />
29
- carx drift racing 2 mod apk mega<br />
30
- carx drift racing 2 mod apk an1<br />
31
- carx drift racing 2 mod apk hack<br />
32
- carx drift racing 2 mod apk cheat<br />
33
- carx drift racing 2 mod apk premium<br />
34
- carx drift racing 2 mod apk pro<br />
35
- carx drift racing 2 mod apk cracked<br />
36
- carx drift racing 2 mod apk mirror<br />
37
- carx drift racing 2 mod apk apkpure<br />
38
- carx drift racing 2 mod apk apkmody<br />
39
- carx drift racing 2 mod apk apkmirror<br />
40
- carx drift racing 2 mod apk apknite<br />
41
- carx drift racing 2 mod apk apksolo<br />
42
- carx drift racing 2 mod apk apksmash<br />
43
- carx drift racing 2 mod apk apkspeedy<br />
44
- carx drift racing 2 mod apk apksafety<br />
45
- carx drift racing 2 mod apk apksmartphone<br />
46
- carx drift racing 2 mod apk apksupermarket<br />
47
- carx drift racing 2 mod apk apksweetness<br />
48
- carx drift racing 2 mod apk apkspecialist<br />
49
- carx drift racing 2 mod apk apksporty<br />
50
- carx drift racing 2 mod apk apksplashy<br />
51
- carx drift racing 2 mod apk apksnappy<br />
52
- carx drift racing 2 mod apk apksavvy<br />
53
- carx drift racing 2 mod apk apksassy</p>
54
- <h3>All cars and tracks unlocked</h3>
55
- <p>Another benefit of CarX Drift Racing 2 mod apk offline is that you don't have to wait or grind to unlock new cars and tracks. You will have access to all of them from the start. You can choose from over 80 cars, each with its own unique features and specifications. You can also race on over 30 tracks, each with its own challenges and scenery. You can explore different locations such as Japan, Dubai, San Francisco, Moscow, and more.</p>
56
- <h3>Realistic physics and graphics</h <h3>Realistic physics and graphics</h3>
57
- <p>CarX Drift Racing 2 mod apk offline also boasts of realistic physics and graphics that make the game more immersive and enjoyable. You can feel the difference between different cars and surfaces, as well as the effects of speed, gravity, and inertia. You can also admire the stunning visuals and details of the cars, tracks, and environments. You can adjust the graphics settings to suit your device and preferences.</p>
58
- <h3>Multiplayer mode and online tournaments</h3>
59
- <p>Even though CarX Drift Racing 2 mod apk offline does not require an internet connection, you can still play with other players online if you want. You can join or create clubs, chat with other racers, and challenge them to duels or team battles. You can also participate in online tournaments and events, where you can compete with players from all over the world and win prizes and trophies. You can also show off your skills and style by uploading your replays and screenshots to the game's social media platforms.</p>
60
- <h3>Customization and tuning options</h3>
61
- <p>One of the most fun aspects of CarX Drift Racing 2 mod apk offline is that you can customize and tune your cars to your liking. You can change the color, design, decals, wheels, spoilers, and other parts of your cars. You can also adjust the engine, suspension, brakes, tires, and other parameters of your cars to improve their performance and handling. You can create your own unique style and personality with your cars.</p>
62
- <h2>How to download and install CarX Drift Racing 2 mod apk offline</h2>
63
- <p>If you are interested in downloading and installing CarX Drift Racing 2 mod apk offline on your device, here are the steps you need to follow:</p>
64
- <h3>Step 1: Download the mod apk file from a trusted source</h3>
65
- <p>The first thing you need to do is to find a reliable source that provides the mod apk file for CarX Drift Racing 2. There are many websites that offer this file, but not all of them are safe and secure. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before downloading anything from the internet. You can use Google or any other search engine to look for reviews, ratings, feedbacks, and comments from other users who have downloaded the file before. You can also check the file size, date, version, and compatibility with your device.</p>
66
- <h3>Step 2: Enable unknown sources on your device settings</h3>
67
- <p>The next thing you need to do is to enable unknown sources on your device settings. This is because CarX Drift Racing 2 mod apk offline is not available on the official app stores like Google Play Store or Apple App Store. Therefore, you need to allow your device to install apps from sources other than these app stores. To do this, you need to go to your device settings, then security or privacy settings, then find the option that says unknown sources or allow installation from unknown sources. You need to toggle this option on or check the box next to it.</p>
68
- <h3>Step 3: Install the mod apk file and launch the game</h3>
69
- <p>The final thing you need to do is to install the mod apk file and launch the game. To do this, you need to locate the downloaded file on your device storage, either using a file manager app or by going to your downloads folder. Then, you need to tap on the file and follow the instructions on the screen to install it. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You can now enjoy CarX Drift Racing 2 mod apk offline on your device!</p>
70
- <h2>Conclusion</h2>
71
- <p>CarX Drift Racing 2 mod apk offline is a great way to enjoy one of the best racing and drifting games on your device without spending any money or needing an internet connection. It has many features that make it superior to the original version, such as unlimited money and gold, all cars and tracks unlocked, realistic physics and graphics, multiplayer mode and online tournaments, customization and tuning options, and more. It is easy to download and install on your device if you follow the steps we have provided in this article.</p>
72
- <p>If you are a racing and drifting enthusiast who wants to experience the thrill of driving powerful cars on various tracks and terrains, you should definitely try CarX Drift Racing 2 mod apk offline. It will give you hours of fun and excitement that will keep you hooked for a long time. So what are you waiting for? Download CarX Drift Racing 2 mod apk offline today and start drifting!</p>
73
- <h3>FAQs</h3>
74
- <p>Here <p>Here are some frequently asked questions about CarX Drift Racing 2 mod apk offline:</p>
75
- <ul>
76
- <li><b>Is CarX Drift Racing 2 mod apk offline safe to use?</b></li>
77
- <p>Yes, CarX Drift Racing 2 mod apk offline is safe to use as long as you download it from a trusted source and scan it with an antivirus app before installing it. However, you should always be careful when downloading and installing any mod apk files from the internet, as some of them may contain harmful or malicious content. You should also backup your data and uninstall the original version of the game before installing the mod apk file.</p>
78
- <li><b>Does CarX Drift Racing 2 mod apk offline work on all devices?</b></li>
79
- <p>CarX Drift Racing 2 mod apk offline works on most Android devices that have Android 4.1 or higher versions. However, some devices may not be compatible or may experience some issues or glitches while running the game. You should check the device requirements and compatibility before downloading and installing the mod apk file. You should also make sure that your device has enough storage space and battery life to run the game smoothly.</p>
80
- <li><b>Can I play CarX Drift Racing 2 mod apk offline with my friends?</b></li>
81
- <p>Yes, you can play CarX Drift Racing 2 mod apk offline with your friends if you have a Wi-Fi or mobile data connection. You can join or create clubs, chat with other racers, and challenge them to duels or team battles. You can also participate in online tournaments and events, where you can compete with players from all over the world and win prizes and trophies. However, if you don't have an internet connection, you can still play the game offline in single-player mode or against AI opponents.</p>
82
- <li><b>How can I update CarX Drift Racing 2 mod apk offline?</b></li>
83
- <p>CarX Drift Racing 2 mod apk offline does not update automatically like the original version of the game. You will have to manually download and install the latest version of the mod apk file from the same source you got it from. You should also check for updates regularly to enjoy new features, cars, tracks, and bug fixes. However, you should be aware that updating the mod apk file may erase your progress and data in the game, so you should backup your data before updating.</p>
84
- <li><b>Where can I get more information about CarX Drift Racing 2 mod apk offline?</b></li>
85
- <p>If you want to get more information about CarX Drift Racing 2 mod apk offline, you can visit the official website of the game, where you can find news, updates, tips, tricks, guides, videos, screenshots, and more. You can also join the official Facebook page or Twitter account of the game, where you can interact with other fans and developers. You can also check out some online forums or blogs that are dedicated to CarX Drift Racing 2 mod apk offline, where you can find more reviews, feedbacks, questions, answers, and discussions.</p>
86
- </ul></p> 197e85843d<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/College Romance Season 1 Episode 1 The First Step of a Crazy Love Adventure.md DELETED
@@ -1,152 +0,0 @@
1
-
2
- <h1>How to Download College Romance Season 1 Episode 1 for Free</h1>
3
- <p>If you are looking for a fun and relatable web series that captures the essence of college life, you should definitely check out College Romance. This is a popular Indian comedy-drama series that follows the adventures and misadventures of three friends, Naira, Trippy, and Karan, as they navigate their #YaarPyaarAurBakchodi (Friendship, Love, and Nonsense) in college. The series is produced by The Viral Fever (TVF) and has two seasons so far, with the first one released in 2018 and the second one in 2020.</p>
4
- <p>In this article, we will show you how to download College Romance season 1 episode 1 for free, so you can enjoy this hilarious and heartwarming show at your convenience. We will also give you a sneak peek of what to expect from the episode, as well as some other ways to enjoy it. So, without further ado, let's get started!</p>
5
- <h2>college romance season 1 episode 1 download</h2><br /><p><b><b>DOWNLOAD</b> &#8250;&#8250;&#8250; <a href="https://jinyurl.com/2uNSVD">https://jinyurl.com/2uNSVD</a></b></p><br /><br />
6
- <h2>Step 1: Find a reliable streaming platform that offers College Romance season 1 episode 1</h2>
7
- <p>The first step to download College Romance season 1 episode 1 is to find a trustworthy and legal streaming platform that offers it. There are many options available online, but not all of them are safe or legitimate. Some may contain viruses, malware, or phishing links that can harm your device or compromise your personal information. Others may have poor video quality, annoying ads, or limited content.</p>
8
- <p>Therefore, we recommend you to use one of the following platforms that have proven to be reliable and user-friendly:</p>
9
- <ul>
10
- <li><strong>Sony Liv</strong>: This is an Indian video-on-demand service that has a wide range of content, including movies, TV shows, sports, news, and original web series. You can watch College Romance season 1 episode 1 on Sony Liv with a premium subscription that costs Rs.299 per month or Rs.999 per year. You can also get a free trial for seven days if you are a new user.</li>
11
- <li><strong>TVF Play</strong>: This is the official website of The Viral Fever, where you can watch all their original web series for free with ads. You can also download their app on your Android or iOS device and enjoy their content offline. You can watch College Romance season 1 episode 1 on TVF Play without any registration or payment.</li>
12
- </ul>
13
- <h2>Step 2: Choose a suitable subscription plan or sign up for a free trial</h2>
14
- <p>The next step to download College Romance season 1 episode 1 is to choose a suitable subscription plan or sign up for a free trial on the platform of your choice. If you opt for Sony Liv, you will need to create an account with your email address or phone number and select a payment method. You can pay with your credit card, debit card, net banking, UPI, or wallet. You will then get access to all their premium content, including College Romance season 1 episode 1.</p>
15
- <p>If you opt for TVF Play, you don't need to pay anything or register anything. You can simply visit their website or download their app and browse their web series category. You will find College Romance season 1 episode 1 under the comedy genre.</p>
16
- <h2>Step 3: Download the episode to your device or watch it online</h2>
17
- <p>The final step to download College Romance season 1 episode 1 is to download the episode to your device or watch it online. If you are using Sony Liv, you can download the episode by clicking on the download icon on the bottom right corner of the video player. You can choose the video quality and the download location. You can also watch the episode online by clicking on the play button.</p>
18
- <p>If you are using TVF Play, you can download the episode by tapping on the download icon on the top right corner of the video player. You can choose the video quality and the download location. You can also watch the episode online by tapping on the play button.</p>
19
- <p>Once you have downloaded or watched College Romance season 1 episode 1, you can enjoy this hilarious and heartwarming show at your convenience. You can also share it with your friends and family and have a good laugh together.</p>
20
- <h2>What to Expect from College Romance Season 1 Episode 1</h2>
21
- <p>Now that you know how to download College Romance season 1 episode 1, you might be wondering what to expect from it. Well, here are some of the things that you can look forward to in this episode:</p>
22
- <p>How to watch college romance season 1 episode 1 online for free<br />
23
- College romance season 1 episode 1 recap and review<br />
24
- College romance season 1 episode 1 streaming on Sony Liv and TVF Play<br />
25
- College romance season 1 episode 1 cast and characters<br />
26
- College romance season 1 episode 1 subtitles and dubbed versions<br />
27
- College romance season 1 episode 1 download in HD quality<br />
28
- College romance season 1 episode 1 plot and summary<br />
29
- College romance season 1 episode 1 trailer and teaser<br />
30
- College romance season 1 episode 1 ratings and reviews<br />
31
- College romance season 1 episode 1 behind the scenes and bloopers<br />
32
- College romance season 1 episode 1 best moments and scenes<br />
33
- College romance season 1 episode 1 memes and fan reactions<br />
34
- College romance season 1 episode 1 spoilers and predictions<br />
35
- College romance season 1 episode 1 watch party and discussion<br />
36
- College romance season 1 episode 1 trivia and facts<br />
37
- College romance season 1 episode 1 music and soundtrack<br />
38
- College romance season 1 episode 1 quotes and dialogues<br />
39
- College romance season 1 episode 1 analysis and commentary<br />
40
- College romance season 1 episode 1 comparison and contrast with other shows<br />
41
- College romance season 1 episode 1 awards and nominations<br />
42
- College romance season 1 episode 1 merchandise and products<br />
43
- College romance season 1 episode 1 fan art and fan fiction<br />
44
- College romance season 1 episode 1 interviews and podcasts<br />
45
- College romance season 1 episode 1 news and updates<br />
46
- College romance season 1 episode 1 release date and time<br />
47
- College romance season 1 episode 2 preview and sneak peek<br />
48
- Where to download college romance season 1 full episodes<br />
49
- How to download college romance season 1 without ads or viruses<br />
50
- How to download college romance season 1 with subtitles or audio options<br />
51
- How to download college romance season 1 on different devices or platforms<br />
52
- How to download college romance season 2 when it comes out<br />
53
- How to download college romance web series all seasons and episodes<br />
54
- How to download college romance web series in different languages or formats<br />
55
- How to download college romance web series legally and ethically<br />
56
- How to download college romance web series for free or cheap<br />
57
- Why you should watch college romance web series if you haven't yet<br />
58
- What you need to know before watching college romance web series <br />
59
- What you can learn from watching college romance web series <br />
60
- What you can expect from watching college romance web series <br />
61
- What you can do after watching college romance web series</p>
62
- <h3>Synopsis: A brief summary of the plot and the main characters</h3>
63
- <p>The first episode of College Romance season 1 introduces us to the three main characters of the show: Naira, Trippy, and Karan. Naira is a smart and confident girl who is looking for love in college. Trippy is a fun-loving and adventurous guy who is always ready for a challenge. Karan is a shy and sweet guy who is afraid of girls and rejection.</p>
64
- <p>The episode follows their first day in college, where they meet new people, make new friends, and face new situations. Naira meets Bagga, a senior who tries to impress her with his cheesy lines and fake stories. Trippy meets Raveena, a junior who challenges him to a bike race. Karan meets Deepika, a cute girl who likes him but he doesn't know how to talk to her.</p>
65
- <p>The episode also shows how Naira, Trippy, and Karan help each other out with their problems and support each other as friends. They share their experiences, give advice, and have fun together.</p>
66
- <h3>Highlights: Some of the best scenes and moments from the episode</h3>
67
- <p>Some of the best scenes and moments from College Romance season 1 episode 1 are:</p>
68
- <ul>
69
- <li>The opening scene where Naira, Trippy, and Karan are getting ready for college and talking to each other on phone.</li>
70
- <li>The scene where Bagga tries to flirt with Naira and she shuts him down with her witty replies.</li>
71
- <li>The scene where Trippy accepts Raveena's challenge and races with her on his bike.</li>
72
- <li>The scene where Karan gets nervous around Deepika and spills coffee on her.</li>
73
- <li>The scene where Naira, Trippy, and Karan meet at the canteen and share their stories.</li>
74
- <li>The scene where Naira tells Trippy to go after Raveena and Karan tells Naira to go after Bagga.</li>
75
- <li>The scene where Trippy kisses Raveena and Naira slaps Bagga.</li>
76
- <li>The scene where Karan gets a text from Deepika asking him out.</li>
77
- <li>The ending scene where Naira, Trippy, and Karan hug each other and celebrate their first day in college.</li>
78
- </ul> <h3>Reviews: What critics and viewers have said about the episode</h3>
79
- <p>College Romance season 1 episode 1 has received positive reviews from both critics and viewers. Here are some of the comments and ratings that the episode has received:</p>
80
- <table>
81
- <tr>
82
- <th>Critic/Viewer</th>
83
- <th>Comment</th>
84
- <th>Rating</th>
85
- </tr>
86
- <tr>
87
- <td>Rajeev Masand, CNN-News18</td>
88
- <td>"College Romance is a refreshing and realistic take on the joys and sorrows of college life. The first episode sets the tone for the series with its witty dialogues, relatable characters, and hilarious situations. The chemistry between the three leads is palpable and their friendship is heartwarming. The episode also touches upon some important issues like peer pressure, consent, and self-esteem."</td>
89
- <td>4/5</td>
90
- </tr>
91
- <tr>
92
- <td>Shreya Thakur, Film Companion</td>
93
- <td>"College Romance is a fun and breezy web series that will make you nostalgic for your college days. The first episode introduces us to the three protagonists who are endearing and entertaining. The episode has a good balance of comedy and drama, and keeps you hooked till the end. The episode also has some memorable scenes and moments that will make you laugh out loud."</td>
94
- <td>3.5/5</td>
95
- </tr>
96
- <tr>
97
- <td>Rohan Sharma, IMDb user</td>
98
- <td>"College Romance is one of the best web series I have ever watched. The first episode is awesome and hilarious. The actors are amazing and they have done a great job. The story is very realistic and relatable. The episode has everything that a college student can relate to: friendship, love, nonsense, and fun. I loved it."</td>
99
- <td>10/10</td>
100
- </tr>
101
- <tr>
102
- <td>Neha Singh, YouTube user</td>
103
- <td>"College Romance is a super cool web series that I totally recommend to everyone. The first episode is very funny and cute. The actors are very good and they have a lot of chemistry. The story is very interesting and engaging. The episode has a lot of funny scenes and dialogues that will make you laugh so hard. I enjoyed it a lot."</td>
104
- <td>Liked</td>
105
- </tr>
106
- </table> <h2>Other Ways to Enjoy College Romance Season 1 Episode 1</h2>
107
- <p>If you are not satisfied with the streaming platforms that we have mentioned above, or if you want to explore other ways to enjoy College Romance season 1 episode 1, here are some alternatives and tips that you can try:</p>
108
- <h3>Alternatives: Other platforms or sources that offer College Romance season 1 episode 1</h3>
109
- <p>Some of the other platforms or sources that offer College Romance season 1 episode 1 are:</p>
110
- <ul>
111
- <li><strong>MX Player</strong>: This is another Indian video-on-demand service that has a large collection of content, including movies, TV shows, web series, music, and games. You can watch College Romance season 1 episode 1 on MX Player for free with ads. You can also download the episode to your device or watch it online.</li>
112
- <li><strong>YouTube</strong>: This is the most popular video-sharing platform in the world, where you can find almost anything that you are looking for. You can watch College Romance season 1 episode 1 on YouTube for free with ads. You can also download the episode to your device or watch it online.</li>
113
- <li><strong>Torrent</strong>: This is a peer-to-peer file-sharing network that allows users to download and share files over the internet. You can download College Romance season 1 episode 1 from torrent sites for free without ads. However, this method is illegal and risky, as you may violate the copyright laws and expose your device to viruses, malware, or hackers.</li>
114
- </ul>
115
- <h3>Tips: How to enhance your viewing experience and avoid spoilers</h3>
116
- <p>Some of the tips that can help you enhance your viewing experience and avoid spoilers are:</p>
117
- <ul>
118
- <li><strong>Use headphones or speakers</strong>: To enjoy the sound effects and the dialogues of College Romance season 1 episode 1, you should use headphones or speakers instead of your device's built-in speakers. This will give you a better audio quality and a more immersive experience.</li>
119
- <li><strong>Watch it with friends</strong>: To make your viewing experience more fun and interactive, you should watch College Romance season 1 episode 1 with your friends. You can share your opinions, reactions, and jokes with them and have a good time together.</li>
120
- <li><strong>Avoid social media</strong>: To avoid spoilers and unwanted information about College Romance season 1 episode 1, you should avoid social media platforms like Facebook, Twitter, Instagram, etc. until you have watched the episode. You may come across posts, comments, or memes that reveal important details or twists about the episode that may ruin your enjoyment.</li>
121
- </ul>
122
- <h4>Conclusion</h4>
123
- <p>In conclusion, College Romance season 1 episode 1 is a great web series that you should not miss if you love comedy and drama. It is a realistic and relatable show that depicts the life of three college friends who are looking for love and fun. It has a lot of humor, romance, and emotions that will keep you entertained and engaged.</p>
124
- <p>To download College Romance season 1 episode 1 for free, you can use one of the reliable streaming platforms that we have suggested above, such as Sony Liv or TVF Play. You can also try other alternatives or tips that we have mentioned above, but be careful of the risks and consequences involved.</p>
125
- <p>We hope that this article has helped you with downloading College Romance season 1 episode 1 for free and enjoying it to the fullest. If you have any questions or feedback, please feel free to leave them in the comments section below. We would love to hear from you!</p>
126
- <p>Thank you for reading and happy watching!</p>
127
- <h4>FAQs</h4>
128
- <p>Here are some of the frequently asked questions about College Romance season 1 episode 1:</p>
129
- <ol>
130
- <li><strong>How many episodes are there in College Romance season 1?</strong></li>
131
- <p>There are five episodes in College Romance season 1, each with a duration of around 20 minutes.</p>
132
- <li><strong>Who are the actors in College Romance season 1?</strong></li>
133
- <p>The actors in College Romance season 1 are:</p>
134
- <ul>
135
- <li>Apoorva Arora as Naira</li>
136
- <li>Gagan Arora as Trippy</li>
137
- <li>Keshav Sadhna as Karan</li>
138
- <li>Hira Ashar as Raveena</li>
139
- <li>Shreya Mehta as Deepika</li>
140
- <li>Sahil Verma as Bagga</li>
141
- </ul>
142
- <li><strong>Where can I watch College Romance season 2?</strong></li>
143
- <p>You can watch College Romance season 2 on Sony Liv or TVF Play with a premium subscription or a free trial. You can also watch it on YouTube or MX Player for free with ads.</p>
144
- <li><strong>Is College Romance based on a true story?</strong></li>
145
- <p>No, College Romance is not based on a true story. It is a fictional web series that is inspired by the common experiences and challenges that college students face in India.</p>
146
- <li><strong>Is College Romance suitable for all ages?</strong></li>
147
- <p>No, College Romance is not suitable for all ages. It is rated 16+ by Sony Liv and TVF Play, as it contains some mature themes, language, and scenes that may not be appropriate for younger viewers.</p>
148
- <li><strong>Will there be a College Romance season 3?</strong></li>
149
- <p>As of now, there is no official confirmation or announcement about College Romance season 3. However, given the popularity and success of the series, there is a high possibility that it will be renewed for another season. We will update you as soon as we get any news or information about it.</p>
150
- </ol></p> 197e85843d<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Fid Q Songs The Best of Tanzanian Hip Hop.md DELETED
@@ -1,132 +0,0 @@
1
-
2
- <h1>Download Fid Q Songs: How to Enjoy the Best of Bongo Hip Hop</h1>
3
- <p>If you are a fan of Bongo Hip Hop, you have probably heard of Fid Q, one of the most talented and influential artists in the genre. Fid Q, also known as Cheusidawa, has been making waves in the Tanzanian music scene since the early 2000s, with his sharp lyricism, unique flow, and social commentary. He has collaborated with many other artists, such as Rich Mavoko, Darassa, Alikiba, and more, and has won several awards and accolades for his work. In this article, we will show you how to download Fid Q songs, so you can enjoy his music anytime, anywhere.</p>
4
- <h2>download fid q songs</h2><br /><p><b><b>DOWNLOAD</b> &gt; <a href="https://jinyurl.com/2uNQoL">https://jinyurl.com/2uNQoL</a></b></p><br /><br />
5
- <h2>Who is Fid Q?</h2>
6
- <h3>His background and career</h3>
7
- <p>Fid Q was born as Fareed Kubanda in Mwanza, Tanzania, in 1980. He grew up listening to hip hop music from the US, especially artists like Nas, Tupac, Biggie, and Jay-Z. He started rapping at a young age, and formed a group called Wakilisha with his friends. He moved to Dar es Salaam in 2001, where he met producer P-Funk Majani, who signed him to his label Bongo Records. He released his first solo album, Vina Mwanzo Kati na Mwisho, in 2004, which featured the hit single "Ukweli na Uwazi". He followed it up with another album, Propaganda, in 2009, which had songs like "Bongo Hip Hop", "Mwanza Mwanza", and "Si Kupenda Kwangu". His third album, KitaaOLOJIA, came out in 2017, and included tracks like "Fresh", "Sumu", and "Tawile". He is currently working on his fourth album, Cheusidawa.</p>
8
- <h3>His style and influence</h3>
9
- <p>Fid Q is known for his witty wordplay, clever metaphors, and deep messages. He often raps about social issues, such as poverty, corruption, education, and patriotism. He also incorporates elements of traditional Tanzanian music and culture into his songs, such as Swahili proverbs, local slang, and historical references. He is widely regarded as one of the pioneers and leaders of Bongo Hip Hop, a subgenre of hip hop that emerged in Tanzania in the late 1990s. He has inspired many other artists in the scene, such as Joh Makini, Nikki Mbishi, Roma Mkatoliki, and more.</p>
10
- <h3>His awards and achievements</h3>
11
- <p>Fid Q has received many accolades for his music over the years. Some of them are:</p>
12
- <p>download fid q tawile mp3<br />
13
- download fid q bongo hiphop video<br />
14
- download fid q best of compilation<br />
15
- download fid q ft rich mavoko tawile<br />
16
- download fid q bongo hiphop lyrics<br />
17
- download fid q latest songs 2023<br />
18
- download fid q cheusidawa album<br />
19
- download fid q bongo hiphop remix<br />
20
- download fid q slide digital playlist<br />
21
- download fid q mavoko tawile official video<br />
22
- download fid q bongo hiphop mp4<br />
23
- download fid q new song 2023<br />
24
- download fid q cheusidawa tv channel<br />
25
- download fid q bongo hiphop instrumental<br />
26
- download fid q slide digital youtube<br />
27
- download fid q mavoko tawile audio<br />
28
- download fid q bongo hiphop song<br />
29
- download fid q old songs mp3<br />
30
- download fid q cheusidawa entertainment<br />
31
- download fid q bongo hiphop live performance<br />
32
- download fid q slide digital instagram<br />
33
- download fid q mavoko tawile lyrics<br />
34
- download fid q bongo hiphop itunes<br />
35
- download fid q popular songs 2022<br />
36
- download fid q cheusidawa music video<br />
37
- download fid q bongo hiphop facebook<br />
38
- download fid q slide digital music<br />
39
- download fid q mavoko tawile song<br />
40
- download fid q bongo hiphop youtube channel<br />
41
- download fid q best songs 2021<br />
42
- download fid q cheusidawa official video<br />
43
- download fid q bongo hiphop spotify<br />
44
- download fid q slide digital tz website<br />
45
- download fid q mavoko tawile mp4<br />
46
- download fid q bongo hiphop online stream<br />
47
- download fid q top songs 2020<br />
48
- download fid q cheusidawa youtube playlist<br />
49
- download fid q bongo hiphop soundcloud<br />
50
- download fid q slide digital twitter<br />
51
- download fid q mavoko tawile remix<br />
52
- download fid q bongo hiphop free mp3<br />
53
- download fid q hit songs 2019<br />
54
- download fid q cheusidawa mp3 song<br />
55
- download fid q bongo hiphop apple music <br />
56
- download fid q slide digital facebook page <br />
57
- download fid q mavoko tawile instrumental <br />
58
- download fid q bongo hiphop ringtone <br />
59
- download fid q classic songs 2018 <br />
60
- download fid q cheusidawa full album</p>
61
- <ul>
62
- <li>Kilimanjaro Music Awards for Best Hip Hop Artist (2005)</li>
63
- <li>Tanzania Music Awards for Best Hip Hop Album (Propaganda) (2010)</li>
64
- <li>Tanzania Music Awards for Best Male Artist (2018)</li>
65
- <li>Tanzania People's Choice Awards for Best Male Artist (2018)</li>
66
- <li>Afrimma Awards for Best Rap Act (East Africa) (2018)</li>
67
- </ul>
68
- <h2>Why download Fid Q songs?</h2>
69
- <h3>The benefits of downloading music</h3>
70
- <p>Downloading music is a great way to enjoy your favorite songs without relying on internet connection or streaming services. Some of the benefits of downloading music are:</p>
71
- <ul>
72
- <li>You can listen to your music offline, which saves you data and battery.</li>
73
- <li>You can create your own playlists and organize your music library according to your preferences.</li>
74
- <li>You can transfer your music to other devices, such as your phone, tablet, or laptop.</li>
75
- <li>You can support your favorite artists by buying their music or downloading it legally.</li>
76
- </ul>
77
- <h3>The reasons to love Fid Q's music</h3>
78
- <p>Fid Q's music is not only entertaining, but also educational, inspirational, and motivational. Some of the reasons to love his music are:</p>
79
- <ul>
80
- <li>He raps with skill and passion, delivering his bars with clarity and confidence.</li>
81
- <li>He tells stories and expresses his opinions, making his songs relatable and meaningful.</li>
82
- <li>He blends different genres and styles, making his songs diverse and versatile.</li>
83
- <li>He collaborates with other artists, making his songs dynamic and collaborative.</li>
84
- <li>He represents his culture and identity, making his songs authentic and original.</li>
85
- </ul>
86
- <h3>The best platforms to download Fid Q songs</h3>
87
- <p>There are many platforms where you can download Fid Q songs, but some of the best ones are:</p>
88
- <ul>
89
- <li><a href="">Boomplay</a>: This is a popular music streaming and downloading app in Africa, where you can find Fid Q's albums and singles. You can also access other features, such as lyrics, videos, podcasts, and more.</li>
90
- <li><a href="">Mdundo</a>: This is another leading music platform in Africa, where you can download Fid Q's songs for free. You can also discover new music, create playlists, and share your favorites with others.</li>
91
- <li><a href="">iTunes</a>: This is a well-known music store and player, where you can buy and download Fid Q's songs. You can also sync your music with your Apple devices and enjoy other benefits, such as iCloud Music Library, Apple Music, and more.</li>
92
- </ul> <h2>How to download Fid Q songs?</h2>
93
- <h3>The steps to follow</h3>
94
- <p>Downloading Fid Q songs is easy and fast, if you follow these simple steps:</p>
95
- <ol>
96
- <li>Choose the platform that you want to use, such as Boomplay, Mdundo, or iTunes.</li>
97
- <li>Search for Fid Q's name or the song that you want to download.</li>
98
- <li>Select the song and click on the download button or icon.</li>
99
- <li>Wait for the download to complete and enjoy your music.</li>
100
- </ol>
101
- <h3>The tips and tricks to optimize your experience</h3>
102
- <p>To make the most out of your music downloading experience, here are some tips and tricks that you can use:</p>
103
- <ul>
104
- <li>Check the quality and size of the song before downloading it, to ensure that it meets your expectations and device capacity.</li>
105
- <li>Use a reliable and secure internet connection, to avoid interruptions and errors during the download process.</li>
106
- <li>Use a good music player, to enhance the sound and performance of your music.</li>
107
- <li>Update your music library regularly, to keep track of your downloads and discover new songs.</li>
108
- </ul>
109
- <h3>The challenges and solutions to downloading Fid Q songs</h3>
110
- <p>Downloading Fid Q songs may not always be smooth and easy, as you may encounter some challenges along the way. Some of them are:</p>
111
- <ul>
112
- <li>Limited access: Some platforms may not be available in your region or device, or may require a subscription or payment to download Fid Q songs. To solve this, you can use a VPN service, a proxy server, or an alternative platform that offers free or affordable downloads.</li>
113
- <li>Legal issues: Some platforms may not have the rights or permission to distribute Fid Q songs, or may violate the intellectual property laws of the artist or the label. To solve this, you can use a platform that has a license or agreement with Fid Q or his management, or respect his terms and conditions of use.</li>
114
- <li>Technical problems: Some platforms may have bugs, glitches, or errors that prevent you from downloading Fid Q songs, or may damage your device or data. To solve this, you can use a platform that has a good reputation, a high rating, and a positive feedback from other users, or contact their customer support for assistance.</li>
115
- </ul> <h2>Conclusion</h2>
116
- <h3>Summary of the main points</h3>
117
- <p>In this article, we have learned how to download Fid Q songs, so we can enjoy the best of Bongo Hip Hop. We have also learned more about Fid Q, his background, his style, and his achievements. We have explored the benefits of downloading music, the reasons to love Fid Q's music, and the best platforms to download his songs. We have also shared the steps to follow, the tips and tricks to optimize our experience, and the challenges and solutions to downloading his songs.</p>
118
- <h3>Call to action and recommendation</h3>
119
- <p>Now that you know how to download Fid Q songs, what are you waiting for? Go ahead and download your favorite songs from his albums and singles, and enjoy his music on your device. You can also share his music with your friends and family, and support him on his social media platforms. If you like Fid Q's music, you may also like other Bongo Hip Hop artists, such as Professor Jay, G Nako, Young Killer, and more. You can find their songs on the same platforms that we have mentioned above. Thank you for reading this article, and we hope you have a great time listening to Fid Q's music.</p>
120
- <h2>FAQs</h2>
121
- <h4>Q: How can I contact Fid Q?</h4>
122
- <p>A: You can contact Fid Q through his official email address ([email protected]), his Instagram account (@fidqcheusidawa), his Twitter account (@fidqcheusidawa), or his Facebook page (Fid Q).</p>
123
- <h4>Q: How can I buy Fid Q's merchandise?</h4>
124
- <p>A: You can buy Fid Q's merchandise, such as T-shirts, caps, hoodies, and more, from his online store (https://fidqstore.com/). You can also find his merchandise at some physical stores in Tanzania.</p>
125
- <h4>Q: How can I watch Fid Q's videos?</h4>
126
- <p>A: You can watch Fid Q's videos on his YouTube channel (https://www.youtube.com/user/fidqcheusidawa), where he uploads his official music videos, behind the scenes footage, interviews, and more.</p>
127
- <h4>Q: How can I support Fid Q's projects?</h4>
128
- <p>A: You can support Fid Q's projects by buying his music, streaming his songs, downloading his songs legally, sharing his music with others, following him on social media, subscribing to his YouTube channel, buying his merchandise, attending his shows, and giving him feedback.</p>
129
- <h4>Q: How can I learn more about Bongo Hip Hop?</h4>
130
- <p>A: You can learn more about Bongo Hip Hop by listening to more artists in the genre, reading articles and blogs about it, watching documentaries and shows about it, joining online forums and groups about it, and visiting Tanzania and experiencing it firsthand.</p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/ForYou Pakistan - TikTok The Ultimate App for Viral Content Creators.md DELETED
@@ -1,131 +0,0 @@
1
-
2
- <h1>Pakistan TikTok APK: What You Need to Know</h1>
3
- <p>TikTok is one of the most popular social media platforms in the world, with over one billion users. However, in Pakistan, the app has faced some difficulties due to its content and regulations. In this article, we will explain what TikTok is, why it is banned in Pakistan, what are the alternatives, and how to download TikTok APK for Android devices.</p>
4
- <h2>pakistan tiktok apk</h2><br /><p><b><b>DOWNLOAD</b> &#10004; <a href="https://jinyurl.com/2uNTgV">https://jinyurl.com/2uNTgV</a></b></p><br /><br />
5
- <h2>What is TikTok and why is it popular?</h2>
6
- <p>TikTok is a video-sharing app that allows users to create and share short-form videos on any topic. Users can add music, effects, filters, stickers, voiceovers, and more to their videos. They can also watch videos from other users, follow their favorite creators, comment, like, and share. TikTok has a variety of categories and genres, such as comedy, gaming, DIY, food, sports, memes, pets, and more.</p>
7
- <p>TikTok has several features and benefits that make it entertaining, creative, and engaging. Some of these features are:</p>
8
- <ul>
9
- <li>A personalized video feed based on what you watch, like, and share</li>
10
- <li>An endless stream of short videos that are exciting, spontaneous, and genuine</li>
11
- <li>A global community of creators that showcase their incredible skills and everyday life</li>
12
- <li>A platform that encourages innovation and expression</li>
13
- <li>An easy-to-use interface and editing tools</li>
14
- <li>A huge library of music clips and sounds</li>
15
- <li>A way to reuse content from other videos by remixing or adding your own touch</li>
16
- </ul>
17
- <h2>Why is TikTok banned in Pakistan and what are the alternatives?</h2>
18
- <p>TikTok has been banned in Pakistan multiple times due to complaints about immoral and indecent content. The Pakistan Telecommunication Authority (PTA) has issued orders to block access to the app after receiving petitions from different segments of society. The PTA has also said that TikTok has not complied with its requests to moderate unlawful content according to local laws.</p>
19
- <p>TikTok users in Pakistan can use other apps that offer similar or different features as alternatives. Some of these apps are:</p>
20
- <ul>
21
- <li><strong>Instagram Reels:</strong> A feature within Instagram that lets users create short videos with music and effects. Users can also discover reels from other users on the Explore tab.</li>
22
- <li><strong>Triller:</strong> An app similar to TikTok that allows users to create short videos with music and filters. Users can also collaborate with other creators and join challenges.</li>
23
- <li><strong>YouTube Shorts:</strong> A feature within YouTube that lets users create short vertical videos with music and effects. Users can also browse shorts from other users on the Shorts tab.</li> Continuing the article: <li><strong>Chingari:</strong> An app similar to TikTok that allows users to create short videos with music and filters. Users can also watch videos from different categories, such as comedy, news, sports, and more .</li>
24
- <li><strong>Dubsmash:</strong> An app similar to TikTok that allows users to create short videos with audio clips from famous songs, movie scenes, quotes, and more. Users can also watch videos from other users and chat with them .</li>
25
- </ul>
26
- <h2>How to download TikTok APK for Android devices?</h2>
27
- <p>TikTok APK is a file that allows users to install the app on their Android devices without using the Google Play Store. This can be useful for users who cannot access the app from the official store or want to use an older or modified version of the app.</p>
28
- <p>pakistan tiktok app download<br />
29
- pakistan tiktok ban<br />
30
- pakistan tiktok star<br />
31
- pakistan tiktok video<br />
32
- pakistan tiktok lite apk<br />
33
- pakistan tiktok alternative<br />
34
- pakistan tiktok famous<br />
35
- pakistan tiktok news<br />
36
- pakistan tiktok comedy<br />
37
- pakistan tiktok challenge<br />
38
- pakistan tiktok foryou apk<br />
39
- pakistan tiktok unban<br />
40
- pakistan tiktok girl<br />
41
- pakistan tiktok song<br />
42
- pakistan tiktok mod apk<br />
43
- pakistan tiktok viral<br />
44
- pakistan tiktok drama<br />
45
- pakistan tiktok dance<br />
46
- pakistan tiktok pro apk<br />
47
- pakistan tiktok funny<br />
48
- pakistan tiktok latest version apk<br />
49
- pakistan tiktok update<br />
50
- pakistan tiktok boy<br />
51
- pakistan tiktok status<br />
52
- pakistan tiktok premium apk<br />
53
- pakistan tiktok trend<br />
54
- pakistan tiktok prank<br />
55
- pakistan tiktok duet<br />
56
- pakistan tiktok hack apk<br />
57
- pakistan tiktok meme<br />
58
- pakistan tiktok old version apk<br />
59
- pakistan tiktok review<br />
60
- pakistan tiktok couple<br />
61
- pakistan tiktok poetry<br />
62
- pakistan tiktok plus apk<br />
63
- pakistan tiktok reaction<br />
64
- pakistan tiktok roast<br />
65
- pakistan tiktok slowmo<br />
66
- pakistan tiktok adfree apk<br />
67
- pakistan tiktok talent<br />
68
- pakistan tiktok original apk<br />
69
- pakistan tiktok rating<br />
70
- pakistan tiktok family<br />
71
- pakistan tiktok naat<br />
72
- pakistan tiktok downloader apk<br />
73
- pakistan tiktok earnings<br />
74
- pakistan tiktok wedding<br />
75
- pakistan tiktok voiceover<br />
76
- pakistan tiktok no watermark apk</p>
77
- <p>Users can download TikTok APK from various sources, such as APKPure, Uptodown, or WizCase. However, users should be careful and only download the APK files from trusted and verified sources, as some files may contain malware or viruses that can harm their devices. Users should also enable the option to install apps from unknown sources in their device settings before installing the APK files.</p>
78
- <p>Here are the steps to download TikTok APK from APKPure:</p>
79
- <ol>
80
- <li>Go to <a href="(^1^)">https://apkpure.com/tiktok/com.zhiliaoapp.musically</a> on your browser.</li>
81
- <li>Click on the green Download APK button and wait for the file to be downloaded.</li>
82
- <li>Open the file manager on your device and locate the downloaded file.</li>
83
- <li>Tap on the file and follow the instructions to install the app.</li>
84
- <li>Enjoy TikTok on your device.</li>
85
- </ol>
86
- <h2>Conclusion</h2>
87
- <p>TikTok is a fun and popular app that has faced some challenges in Pakistan due to its content. Users can still enjoy TikTok or its alternatives by downloading the APK files from reliable sources. However, users should be aware of the risks and responsibilities of using these apps and respect the local laws and norms.</p>
88
- <h2>FAQs</h2>
89
- <h3>What are the advantages and disadvantages of TikTok?</h3>
90
- <p>TikTok has many advantages, such as:</p>
91
- <ul>
92
- <li>It is a platform for creativity and expression</li>
93
- <li>It is a source of entertainment and education</li>
94
- <li>It is a way to connect with people and cultures</li>
95
- <li>It is a tool for marketing and promotion</li>
96
- </ul>
97
- <p>TikTok also has some disadvantages, such as:</p>
98
- <ul>
99
- <li>It can be addictive and time-consuming</li>
100
- <li>It can expose users to inappropriate or harmful content</li>
101
- <li>It can violate users' privacy and security</li>
102
- <li>It can cause legal or ethical issues</li>
103
- </ul>
104
- <h3>What does TikTok mean and where did it come from?</h3>
105
- <p>TikTok is a combination of two words: "tick" and "tock", which are the sounds of a clock. The name suggests that the app is about capturing moments in time. TikTok was launched in 2016 by ByteDance, a Chinese internet company. It was originally called Douyin in China, but was rebranded as TikTok for the international market in 2017. In 2018, TikTok merged with Musical.ly, another popular video-sharing app.</p>
106
- <h3>How can I watch TikTok videos without downloading the app?</h3>
107
- <p>You can watch TikTok videos without downloading the app by using a web browser. You can go to <a href="(^2^)">https://www.tiktok.com/</a> and browse through different categories and hashtags. You can also search for specific users or videos by using the search bar. However, you will not be able to create or upload videos, comment, like, or share without an account or the app.</p>
108
- <h3>How can I make a successful video on TikTok?</h3>
109
- <p>To make a successful video on TikTok, you should follow some tips, such as:</p>
110
- <ul>
111
- <li>Pick a niche or theme that suits your personality and interests</li>
112
- <li>Use catchy music, effects, filters, and stickers to enhance your video</li>
113
- <li>Add relevant hashtags, captions, and keywords to your video</li>
114
- <li>Follow the trends and challenges on TikTok and join them</li>
115
- <li>Collaborate with other creators and influencers on TikTok</li>
116
- <li>Engage with your audience and respond to their comments</li> Continuing the article: <li>Post regularly and at the best times for your audience</li>
117
- <li>Analyze your performance and improve your strategy</li>
118
- </ul>
119
- <h3>How can I use TikTok for business promotion?</h3>
120
- <p>TikTok can be a powerful tool for business promotion, as it can help you reach a large and diverse audience, increase your brand awareness, showcase your products or services, and drive traffic to your website or store. To use TikTok for business promotion, you should follow some steps, such as:</p>
121
- <ol>
122
- <li>Create a business account on TikTok and optimize your profile</li>
123
- <li>Define your target audience and goals</li>
124
- <li>Create engaging and relevant content that showcases your brand personality and value proposition</li>
125
- <li>Use hashtags, keywords, and calls to action to increase your visibility and conversions</li>
126
- <li>Partner with influencers or celebrities that match your brand image and audience</li>
127
- <li>Run paid ads or sponsored campaigns on TikTok to reach more potential customers</li>
128
- <li>Measure your results and adjust your strategy accordingly</li>
129
- </ol></p> 401be4b1e0<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/encoder/__init__.py DELETED
File without changes
spaces/4Taps/SadTalker/src/audio2pose_models/audio_encoder.py DELETED
@@ -1,64 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from torch.nn import functional as F
4
-
5
- class Conv2d(nn.Module):
6
- def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs):
7
- super().__init__(*args, **kwargs)
8
- self.conv_block = nn.Sequential(
9
- nn.Conv2d(cin, cout, kernel_size, stride, padding),
10
- nn.BatchNorm2d(cout)
11
- )
12
- self.act = nn.ReLU()
13
- self.residual = residual
14
-
15
- def forward(self, x):
16
- out = self.conv_block(x)
17
- if self.residual:
18
- out += x
19
- return self.act(out)
20
-
21
- class AudioEncoder(nn.Module):
22
- def __init__(self, wav2lip_checkpoint):
23
- super(AudioEncoder, self).__init__()
24
-
25
- self.audio_encoder = nn.Sequential(
26
- Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
27
- Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
28
- Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
29
-
30
- Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
31
- Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
32
- Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
33
-
34
- Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
35
- Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
36
- Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
37
-
38
- Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
39
- Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
40
-
41
- Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
42
- Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)
43
-
44
- #### load the pre-trained audio_encoder\
45
- wav2lip_state_dict = torch.load(wav2lip_checkpoint)['state_dict']
46
- state_dict = self.audio_encoder.state_dict()
47
-
48
- for k,v in wav2lip_state_dict.items():
49
- if 'audio_encoder' in k:
50
- state_dict[k.replace('module.audio_encoder.', '')] = v
51
- self.audio_encoder.load_state_dict(state_dict)
52
-
53
-
54
- def forward(self, audio_sequences):
55
- # audio_sequences = (B, T, 1, 80, 16)
56
- B = audio_sequences.size(0)
57
-
58
- audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)
59
-
60
- audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1
61
- dim = audio_embedding.shape[1]
62
- audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1))
63
-
64
- return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_real_world_denoising/main_test_SRMNet.py DELETED
@@ -1,86 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import numpy as np
5
- from collections import OrderedDict
6
- from skimage import img_as_ubyte
7
- import os
8
- import torch
9
- import requests
10
- from PIL import Image
11
- import torchvision.transforms.functional as TF
12
- import torch.nn.functional as F
13
- from natsort import natsorted
14
- from model.SRMNet import SRMNet
15
-
16
- def main():
17
- parser = argparse.ArgumentParser(description='Demo Image Denoising')
18
- parser.add_argument('--input_dir', default='test/', type=str, help='Input images')
19
- parser.add_argument('--result_dir', default='result/', type=str, help='Directory for results')
20
- parser.add_argument('--weights',
21
- default='experiments/pretrained_models/real_denoising_SRMNet.pth', type=str,
22
- help='Path to weights')
23
-
24
- args = parser.parse_args()
25
-
26
- inp_dir = args.input_dir
27
- out_dir = args.result_dir
28
-
29
- os.makedirs(out_dir, exist_ok=True)
30
-
31
- files = natsorted(glob.glob(os.path.join(inp_dir, '*')))
32
-
33
- if len(files) == 0:
34
- raise Exception(f"No files found at {inp_dir}")
35
-
36
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
-
38
- # Load corresponding models architecture and weights
39
- model = SRMNet()
40
- model = model.to(device)
41
- model.eval()
42
- load_checkpoint(model, args.weights)
43
-
44
-
45
- mul = 16
46
- for file_ in files:
47
- img = Image.open(file_).convert('RGB')
48
- input_ = TF.to_tensor(img).unsqueeze(0).to(device)
49
-
50
- # Pad the input if not_multiple_of 8
51
- h, w = input_.shape[2], input_.shape[3]
52
- H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
53
- padh = H - h if h % mul != 0 else 0
54
- padw = W - w if w % mul != 0 else 0
55
- input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
56
- with torch.no_grad():
57
- restored = model(input_)
58
-
59
- restored = torch.clamp(restored, 0, 1)
60
- restored = restored[:, :, :h, :w]
61
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
62
- restored = img_as_ubyte(restored[0])
63
-
64
- f = os.path.splitext(os.path.split(file_)[-1])[0]
65
- save_img((os.path.join(out_dir, f + '.png')), restored)
66
-
67
-
68
- def save_img(filepath, img):
69
- cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
70
-
71
-
72
- def load_checkpoint(model, weights):
73
- checkpoint = torch.load(weights, map_location=torch.device('cpu'))
74
- try:
75
- model.load_state_dict(checkpoint["state_dict"])
76
- except:
77
- state_dict = checkpoint["state_dict"]
78
- new_state_dict = OrderedDict()
79
- for k, v in state_dict.items():
80
- name = k[7:] # remove `module.`
81
- new_state_dict[name] = v
82
- model.load_state_dict(new_state_dict)
83
-
84
-
85
- if __name__ == '__main__':
86
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/components/chat-message.tsx DELETED
@@ -1,93 +0,0 @@
1
- import remarkGfm from 'remark-gfm'
2
- import remarkMath from 'remark-math'
3
- import supersub from 'remark-supersub'
4
- import remarkBreaks from 'remark-breaks'
5
- import { cn } from '@/lib/utils'
6
- import { CodeBlock } from '@/components/ui/codeblock'
7
- import { MemoizedReactMarkdown } from '@/components/markdown'
8
- import { LearnMore } from './learn-more'
9
- import { ChatMessageModel } from '@/lib/bots/bing/types'
10
- import { useEffect } from 'react'
11
- import { TurnCounter } from './turn-counter'
12
-
13
- export interface ChatMessageProps {
14
- message: ChatMessageModel
15
- }
16
-
17
- export function ChatMessage({ message, ...props }: ChatMessageProps) {
18
- useEffect(() => {
19
- if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) {
20
- window.scrollBy(0, 200)
21
- }
22
- }, [message.text])
23
-
24
- return message.text ? (
25
- <div
26
- className={cn('text-message', message.author)}
27
- {...props}
28
- >
29
- <div className="text-message-content">
30
- <MemoizedReactMarkdown
31
- linkTarget="_blank"
32
- className="prose break-words dark:prose-invert prose-p:leading-relaxed prose-pre:p-0"
33
- remarkPlugins={[remarkGfm, remarkMath, supersub, remarkBreaks]}
34
- components={{
35
- img(obj) {
36
- try {
37
- const uri = new URL(obj.src!)
38
- const w = uri.searchParams.get('w')
39
- const h = uri.searchParams.get('h')
40
- if (w && h) {
41
- uri.searchParams.delete('w')
42
- uri.searchParams.delete('h')
43
- return <a style={{ float: 'left', maxWidth: '50%' }} href={uri.toString()} target="_blank" rel="noopener noreferrer"><img src={obj.src} alt={obj.alt} width={w!} height={h!}/></a>
44
- }
45
- } catch (e) {
46
- }
47
- return <img src={obj.src} alt={obj.alt} title={obj.title} />
48
- },
49
- p({ children }) {
50
- return <p className="mb-2">{children}</p>
51
- },
52
- code({ node, inline, className, children, ...props }) {
53
- if (children.length) {
54
- if (children[0] == '▍') {
55
- return (
56
- <span className="mt-1 animate-pulse cursor-default">▍</span>
57
- )
58
- }
59
-
60
- children[0] = (children[0] as string).replace('`▍`', '▍')
61
- }
62
-
63
- const match = /language-(\w+)/.exec(className || '')
64
-
65
- if (inline) {
66
- return (
67
- <code className={className} {...props}>
68
- {children}
69
- </code>
70
- )
71
- }
72
-
73
- return (
74
- <CodeBlock
75
- key={Math.random()}
76
- language={(match && match[1]) || ''}
77
- value={String(children).replace(/\n$/, '')}
78
- {...props}
79
- />
80
- )
81
- }
82
- }}
83
- >
84
- {message.text}
85
- </MemoizedReactMarkdown>
86
- </div>
87
- <div className="text-message-footer">
88
- {message.author === 'bot' && <LearnMore sourceAttributions={message.sourceAttributions} />}
89
- {message.author === 'bot' && <TurnCounter throttling={message.throttling} />}
90
- </div>
91
- </div>
92
- ) : null
93
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A666sxr/Genshin_TTS/pqmf.py DELETED
@@ -1,116 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- # Copyright 2020 Tomoki Hayashi
4
- # MIT License (https://opensource.org/licenses/MIT)
5
-
6
- """Pseudo QMF modules."""
7
-
8
- import numpy as np
9
- import torch
10
- import torch.nn.functional as F
11
-
12
- from scipy.signal import kaiser
13
-
14
-
15
- def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
16
- """Design prototype filter for PQMF.
17
- This method is based on `A Kaiser window approach for the design of prototype
18
- filters of cosine modulated filterbanks`_.
19
- Args:
20
- taps (int): The number of filter taps.
21
- cutoff_ratio (float): Cut-off frequency ratio.
22
- beta (float): Beta coefficient for kaiser window.
23
- Returns:
24
- ndarray: Impluse response of prototype filter (taps + 1,).
25
- .. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
26
- https://ieeexplore.ieee.org/abstract/document/681427
27
- """
28
- # check the arguments are valid
29
- assert taps % 2 == 0, "The number of taps mush be even number."
30
- assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
31
-
32
- # make initial filter
33
- omega_c = np.pi * cutoff_ratio
34
- with np.errstate(invalid='ignore'):
35
- h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \
36
- / (np.pi * (np.arange(taps + 1) - 0.5 * taps))
37
- h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
38
-
39
- # apply kaiser window
40
- w = kaiser(taps + 1, beta)
41
- h = h_i * w
42
-
43
- return h
44
-
45
-
46
- class PQMF(torch.nn.Module):
47
- """PQMF module.
48
- This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
49
- .. _`Near-perfect-reconstruction pseudo-QMF banks`:
50
- https://ieeexplore.ieee.org/document/258122
51
- """
52
-
53
- def __init__(self, device, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0):
54
- """Initilize PQMF module.
55
- Args:
56
- subbands (int): The number of subbands.
57
- taps (int): The number of filter taps.
58
- cutoff_ratio (float): Cut-off frequency ratio.
59
- beta (float): Beta coefficient for kaiser window.
60
- """
61
- super(PQMF, self).__init__()
62
-
63
- # define filter coefficient
64
- h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
65
- h_analysis = np.zeros((subbands, len(h_proto)))
66
- h_synthesis = np.zeros((subbands, len(h_proto)))
67
- for k in range(subbands):
68
- h_analysis[k] = 2 * h_proto * np.cos(
69
- (2 * k + 1) * (np.pi / (2 * subbands)) *
70
- (np.arange(taps + 1) - ((taps - 1) / 2)) +
71
- (-1) ** k * np.pi / 4)
72
- h_synthesis[k] = 2 * h_proto * np.cos(
73
- (2 * k + 1) * (np.pi / (2 * subbands)) *
74
- (np.arange(taps + 1) - ((taps - 1) / 2)) -
75
- (-1) ** k * np.pi / 4)
76
-
77
- # convert to tensor
78
- analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1).to(device)
79
- synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0).to(device)
80
-
81
- # register coefficients as beffer
82
- self.register_buffer("analysis_filter", analysis_filter)
83
- self.register_buffer("synthesis_filter", synthesis_filter)
84
-
85
- # filter for downsampling & upsampling
86
- updown_filter = torch.zeros((subbands, subbands, subbands)).float().to(device)
87
- for k in range(subbands):
88
- updown_filter[k, k, 0] = 1.0
89
- self.register_buffer("updown_filter", updown_filter)
90
- self.subbands = subbands
91
-
92
- # keep padding info
93
- self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
94
-
95
- def analysis(self, x):
96
- """Analysis with PQMF.
97
- Args:
98
- x (Tensor): Input tensor (B, 1, T).
99
- Returns:
100
- Tensor: Output tensor (B, subbands, T // subbands).
101
- """
102
- x = F.conv1d(self.pad_fn(x), self.analysis_filter)
103
- return F.conv1d(x, self.updown_filter, stride=self.subbands)
104
-
105
- def synthesis(self, x):
106
- """Synthesis with PQMF.
107
- Args:
108
- x (Tensor): Input tensor (B, subbands, T // subbands).
109
- Returns:
110
- Tensor: Output tensor (B, 1, T).
111
- """
112
- # NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
113
- # Not sure this is the correct way, it is better to check again.
114
- # TODO(kan-bayashi): Understand the reconstruction procedure
115
- x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands)
116
- return F.conv1d(self.pad_fn(x), self.synthesis_filter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/tests/losses/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/scripts/train.py DELETED
@@ -1,32 +0,0 @@
1
- """
2
- This file runs the main training/val loop
3
- """
4
- import os
5
- import json
6
- import sys
7
- import pprint
8
-
9
- sys.path.append(".")
10
- sys.path.append("..")
11
-
12
- from options.train_options import TrainOptions
13
- from training.coach import Coach
14
-
15
-
16
- def main():
17
- opts = TrainOptions().parse()
18
- if os.path.exists(opts.exp_dir):
19
- raise Exception('Oops... {} already exists'.format(opts.exp_dir))
20
- os.makedirs(opts.exp_dir)
21
-
22
- opts_dict = vars(opts)
23
- pprint.pprint(opts_dict)
24
- with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
25
- json.dump(opts_dict, f, indent=4, sort_keys=True)
26
-
27
- coach = Coach(opts)
28
- coach.train()
29
-
30
-
31
- if __name__ == '__main__':
32
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/hifigan/models.py DELETED
@@ -1,174 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from torch.nn import Conv1d, ConvTranspose1d
5
- from torch.nn.utils import weight_norm, remove_weight_norm
6
-
7
- LRELU_SLOPE = 0.1
8
-
9
-
10
- def init_weights(m, mean=0.0, std=0.01):
11
- classname = m.__class__.__name__
12
- if classname.find("Conv") != -1:
13
- m.weight.data.normal_(mean, std)
14
-
15
-
16
- def get_padding(kernel_size, dilation=1):
17
- return int((kernel_size * dilation - dilation) / 2)
18
-
19
-
20
- class ResBlock(torch.nn.Module):
21
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
22
- super(ResBlock, self).__init__()
23
- self.h = h
24
- self.convs1 = nn.ModuleList(
25
- [
26
- weight_norm(
27
- Conv1d(
28
- channels,
29
- channels,
30
- kernel_size,
31
- 1,
32
- dilation=dilation[0],
33
- padding=get_padding(kernel_size, dilation[0]),
34
- )
35
- ),
36
- weight_norm(
37
- Conv1d(
38
- channels,
39
- channels,
40
- kernel_size,
41
- 1,
42
- dilation=dilation[1],
43
- padding=get_padding(kernel_size, dilation[1]),
44
- )
45
- ),
46
- weight_norm(
47
- Conv1d(
48
- channels,
49
- channels,
50
- kernel_size,
51
- 1,
52
- dilation=dilation[2],
53
- padding=get_padding(kernel_size, dilation[2]),
54
- )
55
- ),
56
- ]
57
- )
58
- self.convs1.apply(init_weights)
59
-
60
- self.convs2 = nn.ModuleList(
61
- [
62
- weight_norm(
63
- Conv1d(
64
- channels,
65
- channels,
66
- kernel_size,
67
- 1,
68
- dilation=1,
69
- padding=get_padding(kernel_size, 1),
70
- )
71
- ),
72
- weight_norm(
73
- Conv1d(
74
- channels,
75
- channels,
76
- kernel_size,
77
- 1,
78
- dilation=1,
79
- padding=get_padding(kernel_size, 1),
80
- )
81
- ),
82
- weight_norm(
83
- Conv1d(
84
- channels,
85
- channels,
86
- kernel_size,
87
- 1,
88
- dilation=1,
89
- padding=get_padding(kernel_size, 1),
90
- )
91
- ),
92
- ]
93
- )
94
- self.convs2.apply(init_weights)
95
-
96
- def forward(self, x):
97
- for c1, c2 in zip(self.convs1, self.convs2):
98
- xt = F.leaky_relu(x, LRELU_SLOPE)
99
- xt = c1(xt)
100
- xt = F.leaky_relu(xt, LRELU_SLOPE)
101
- xt = c2(xt)
102
- x = xt + x
103
- return x
104
-
105
- def remove_weight_norm(self):
106
- for l in self.convs1:
107
- remove_weight_norm(l)
108
- for l in self.convs2:
109
- remove_weight_norm(l)
110
-
111
-
112
- class Generator(torch.nn.Module):
113
- def __init__(self, h):
114
- super(Generator, self).__init__()
115
- self.h = h
116
- self.num_kernels = len(h.resblock_kernel_sizes)
117
- self.num_upsamples = len(h.upsample_rates)
118
- self.conv_pre = weight_norm(
119
- Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)
120
- )
121
- resblock = ResBlock
122
-
123
- self.ups = nn.ModuleList()
124
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
125
- self.ups.append(
126
- weight_norm(
127
- ConvTranspose1d(
128
- h.upsample_initial_channel // (2**i),
129
- h.upsample_initial_channel // (2 ** (i + 1)),
130
- k,
131
- u,
132
- padding=(k - u) // 2,
133
- )
134
- )
135
- )
136
-
137
- self.resblocks = nn.ModuleList()
138
- for i in range(len(self.ups)):
139
- ch = h.upsample_initial_channel // (2 ** (i + 1))
140
- for j, (k, d) in enumerate(
141
- zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
142
- ):
143
- self.resblocks.append(resblock(h, ch, k, d))
144
-
145
- self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
146
- self.ups.apply(init_weights)
147
- self.conv_post.apply(init_weights)
148
-
149
- def forward(self, x):
150
- x = self.conv_pre(x)
151
- for i in range(self.num_upsamples):
152
- x = F.leaky_relu(x, LRELU_SLOPE)
153
- x = self.ups[i](x)
154
- xs = None
155
- for j in range(self.num_kernels):
156
- if xs is None:
157
- xs = self.resblocks[i * self.num_kernels + j](x)
158
- else:
159
- xs += self.resblocks[i * self.num_kernels + j](x)
160
- x = xs / self.num_kernels
161
- x = F.leaky_relu(x)
162
- x = self.conv_post(x)
163
- x = torch.tanh(x)
164
-
165
- return x
166
-
167
- def remove_weight_norm(self):
168
- # print("Removing weight norm...")
169
- for l in self.ups:
170
- remove_weight_norm(l)
171
- for l in self.resblocks:
172
- l.remove_weight_norm()
173
- remove_weight_norm(self.conv_pre)
174
- remove_weight_norm(self.conv_post)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/midas_net.py DELETED
@@ -1,76 +0,0 @@
1
- """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
- This file contains code that is adapted from
3
- https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .base_model import BaseModel
9
- from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
10
-
11
-
12
- class MidasNet(BaseModel):
13
- """Network for monocular depth estimation.
14
- """
15
-
16
- def __init__(self, path=None, features=256, non_negative=True):
17
- """Init.
18
-
19
- Args:
20
- path (str, optional): Path to saved model. Defaults to None.
21
- features (int, optional): Number of features. Defaults to 256.
22
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
23
- """
24
- print("Loading weights: ", path)
25
-
26
- super(MidasNet, self).__init__()
27
-
28
- use_pretrained = False if path is None else True
29
-
30
- self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
31
-
32
- self.scratch.refinenet4 = FeatureFusionBlock(features)
33
- self.scratch.refinenet3 = FeatureFusionBlock(features)
34
- self.scratch.refinenet2 = FeatureFusionBlock(features)
35
- self.scratch.refinenet1 = FeatureFusionBlock(features)
36
-
37
- self.scratch.output_conv = nn.Sequential(
38
- nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
39
- Interpolate(scale_factor=2, mode="bilinear"),
40
- nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
41
- nn.ReLU(True),
42
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
43
- nn.ReLU(True) if non_negative else nn.Identity(),
44
- )
45
-
46
- if path:
47
- self.load(path)
48
-
49
- def forward(self, x):
50
- """Forward pass.
51
-
52
- Args:
53
- x (tensor): input data (image)
54
-
55
- Returns:
56
- tensor: depth
57
- """
58
-
59
- layer_1 = self.pretrained.layer1(x)
60
- layer_2 = self.pretrained.layer2(layer_1)
61
- layer_3 = self.pretrained.layer3(layer_2)
62
- layer_4 = self.pretrained.layer4(layer_3)
63
-
64
- layer_1_rn = self.scratch.layer1_rn(layer_1)
65
- layer_2_rn = self.scratch.layer2_rn(layer_2)
66
- layer_3_rn = self.scratch.layer3_rn(layer_3)
67
- layer_4_rn = self.scratch.layer4_rn(layer_4)
68
-
69
- path_4 = self.scratch.refinenet4(layer_4_rn)
70
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
71
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
72
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
73
-
74
- out = self.scratch.output_conv(path_1)
75
-
76
- return torch.squeeze(out, dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/EvalCrafter/src/auto_leaderboard/model_metadata_type.py DELETED
@@ -1,30 +0,0 @@
1
- from dataclasses import dataclass
2
- from enum import Enum
3
- import glob
4
- import json
5
- import os
6
- from typing import Dict, List
7
-
8
- from ..utils_display import AutoEvalColumn
9
-
10
- @dataclass
11
- class ModelInfo:
12
- name: str
13
- symbol: str # emoji
14
-
15
- model_type_symbols = {
16
- "LLM": "🟢",
17
- "ImageLLM": "🔶",
18
- "VideoLLM": "⭕",
19
- "Other": "🟦",
20
- }
21
-
22
- class ModelType(Enum):
23
- PT = ModelInfo(name="LLM", symbol="🟢")
24
- FT = ModelInfo(name="ImageLLM", symbol="🔶")
25
- IFT = ModelInfo(name="VideoLLM", symbol="⭕")
26
- RL = ModelInfo(name="Other", symbol="🟦")
27
-
28
- def to_str(self, separator = " "):
29
- return f"{self.value.symbol}{separator}{self.value.name}"
30
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZ2H/06-Streamlit-NLP-Image-Semantic-Search-Images/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 🖼️StreamlitNLUImageSemanticSearch
3
- emoji: 🔍
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZ2H/Gradio-Multilingual-ImageToOCR/app.py DELETED
@@ -1,54 +0,0 @@
1
- import pandas as pd
2
- import PIL
3
- from PIL import Image
4
- from PIL import ImageDraw
5
- import gradio as gr
6
- import torch
7
- import easyocr
8
-
9
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png', 'english.png')
10
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg', 'chinese.jpg')
11
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg', 'japanese.jpg')
12
- torch.hub.download_url_to_file('https://i.imgur.com/mwQFd7G.jpeg', 'Hindi.jpeg')
13
-
14
- def draw_boxes(image, bounds, color='yellow', width=2):
15
- draw = ImageDraw.Draw(image)
16
- for bound in bounds:
17
- p0, p1, p2, p3 = bound[0]
18
- draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
19
- return image
20
-
21
- def inference(img, lang):
22
- reader = easyocr.Reader(lang)
23
- bounds = reader.readtext(img.name)
24
- im = PIL.Image.open(img.name)
25
- draw_boxes(im, bounds)
26
- im.save('result.jpg')
27
- return ['result.jpg', pd.DataFrame(bounds).iloc[: , 1:]]
28
-
29
- title = 'Image To Optical Character Recognition'
30
- description = 'Multilingual OCR which works conveniently on all devices in multiple languages.'
31
- article = "<p style='text-align: center'></p>"
32
- examples = [['english.png',['en']],['chinese.jpg',['ch_sim', 'en']],['japanese.jpg',['ja', 'en']],['Hindi.jpeg',['hi', 'en']]]
33
- css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
34
- choices = [
35
- "ch_sim",
36
- "ch_tra",
37
- "de",
38
- "en",
39
- "es",
40
- "ja",
41
- "hi",
42
- "ru"
43
- ]
44
- gr.Interface(
45
- inference,
46
- [gr.inputs.Image(type='file', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['en'], label='language')],
47
- [gr.outputs.Image(type='file', label='Output'), gr.outputs.Dataframe(headers=['text', 'confidence'])],
48
- title=title,
49
- description=description,
50
- article=article,
51
- examples=examples,
52
- css=css,
53
- enable_queue=True
54
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/AiService.py DELETED
@@ -1,40 +0,0 @@
1
- import os
2
- import requests
3
- from ...typing import get_type_hints
4
-
5
- url = "https://aiservice.vercel.app/api/chat/answer"
6
- model = ['gpt-3.5-turbo']
7
- supports_stream = False
8
- needs_auth = False
9
-
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- base = ''
13
- for message in messages:
14
- base += '%s: %s\n' % (message['role'], message['content'])
15
- base += 'assistant:'
16
-
17
- headers = {
18
- "accept": "*/*",
19
- "content-type": "text/plain;charset=UTF-8",
20
- "sec-fetch-dest": "empty",
21
- "sec-fetch-mode": "cors",
22
- "sec-fetch-site": "same-origin",
23
- "Referer": "https://aiservice.vercel.app/chat",
24
- }
25
- data = {
26
- "input": base
27
- }
28
- response = requests.post(url, headers=headers, json=data)
29
- if response.status_code == 200:
30
- _json = response.json()
31
- yield _json['data']
32
- else:
33
- print(f"Error Occurred::{response.status_code}")
34
- return None
35
-
36
-
37
-
38
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
39
- '(%s)' % ', '.join(
40
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import ColorInputBase from './ColorInputBase';
2
-
3
- export default function (
4
- config?: ColorInputBase.IConfig
5
- ): ColorInputBase;
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/resample.py DELETED
@@ -1,48 +0,0 @@
1
- import os
2
- import argparse
3
- import librosa
4
- import numpy as np
5
- from multiprocessing import Pool, cpu_count
6
- from scipy.io import wavfile
7
- from tqdm import tqdm
8
-
9
-
10
- def process(item):
11
- spkdir, wav_name, args = item
12
- # speaker 's5', 'p280', 'p315' are excluded,
13
- speaker = spkdir.replace("\\", "/").split("/")[-1]
14
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
15
- if os.path.exists(wav_path) and '.wav' in wav_path:
16
- os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True)
17
- wav, sr = librosa.load(wav_path, sr=None)
18
- wav, _ = librosa.effects.trim(wav, top_db=20)
19
- peak = np.abs(wav).max()
20
- if peak > 1.0:
21
- wav = 0.98 * wav / peak
22
- wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2)
23
- wav2 /= max(wav2.max(), -wav2.min())
24
- save_name = wav_name
25
- save_path2 = os.path.join(args.out_dir2, speaker, save_name)
26
- wavfile.write(
27
- save_path2,
28
- args.sr2,
29
- (wav2 * np.iinfo(np.int16).max).astype(np.int16)
30
- )
31
-
32
-
33
-
34
- if __name__ == "__main__":
35
- parser = argparse.ArgumentParser()
36
- parser.add_argument("--sr2", type=int, default=44100, help="sampling rate")
37
- parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir")
38
- parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir")
39
- args = parser.parse_args()
40
- processs = 30 if cpu_count() > 60 else (cpu_count()-2 if cpu_count() > 4 else 1)
41
- pool = Pool(processes=processs)
42
-
43
- for speaker in os.listdir(args.in_dir):
44
- spk_dir = os.path.join(args.in_dir, speaker)
45
- if os.path.isdir(spk_dir):
46
- print(spk_dir)
47
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
48
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/modules/squeeze_excitation.py DELETED
@@ -1,20 +0,0 @@
1
- import torch.nn as nn
2
-
3
-
4
- class SELayer(nn.Module):
5
- def __init__(self, channel, reduction=16):
6
- super(SELayer, self).__init__()
7
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
8
- self.fc = nn.Sequential(
9
- nn.Linear(channel, channel // reduction, bias=False),
10
- nn.ReLU(inplace=True),
11
- nn.Linear(channel // reduction, channel, bias=False),
12
- nn.Sigmoid()
13
- )
14
-
15
- def forward(self, x):
16
- b, c, _, _ = x.size()
17
- y = self.avg_pool(x).view(b, c)
18
- y = self.fc(y).view(b, c, 1, 1)
19
- res = x * y.expand_as(x)
20
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AmrElsayeh/Interior_style_detector/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Interior Style Detector
3
- emoji: 👀
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py DELETED
@@ -1,1007 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- """Network architectures from the paper
12
- "Analyzing and Improving the Image Quality of StyleGAN".
13
- Matches the original implementation of configs E-F by Karras et al. at
14
- https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
15
-
16
- import numpy as np
17
- import torch
18
- from torch_utils import misc
19
- from torch_utils import persistence
20
- from torch_utils.ops import conv2d_resample
21
- from torch_utils.ops import upfirdn2d
22
- from torch_utils.ops import bias_act
23
- from torch_utils.ops import fma
24
-
25
- # ----------------------------------------------------------------------------
26
-
27
-
28
- @misc.profiled_function
29
- def normalize_2nd_moment(x, dim=1, eps=1e-8):
30
- return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
31
-
32
- # ----------------------------------------------------------------------------
33
-
34
-
35
- @misc.profiled_function
36
- def modulated_conv2d(
37
- # Input tensor of shape [batch_size, in_channels, in_height, in_width].
38
- x,
39
- # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
40
- weight,
41
- # Modulation coefficients of shape [batch_size, in_channels].
42
- styles,
43
- noise=None, # Optional noise tensor to add to the output activations.
44
- up=1, # Integer upsampling factor.
45
- down=1, # Integer downsampling factor.
46
- padding=0, # Padding with respect to the upsampled image.
47
- # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
48
- resample_filter=None,
49
- demodulate=True, # Apply weight demodulation?
50
- # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
51
- flip_weight=True,
52
- # Perform modulation, convolution, and demodulation as a single fused operation?
53
- fused_modconv=True,
54
- ):
55
- batch_size = x.shape[0]
56
- out_channels, in_channels, kh, kw = weight.shape
57
- misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
58
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
59
- misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
60
-
61
- # Pre-normalize inputs to avoid FP16 overflow.
62
- if x.dtype == torch.float16 and demodulate:
63
- weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
64
- weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
65
- styles = styles / \
66
- styles.norm(float('inf'), dim=1, keepdim=True) # max_I
67
-
68
- # Calculate per-sample weights and demodulation coefficients.
69
- w = None
70
- dcoefs = None
71
- if demodulate or fused_modconv:
72
- w = weight.unsqueeze(0) # [NOIkk]
73
- w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
74
- if demodulate:
75
- dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
76
- if demodulate and fused_modconv:
77
- w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
78
-
79
- # Execute by scaling the activations before and after the convolution.
80
- if not fused_modconv:
81
- x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
82
- x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
83
- x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
84
- if demodulate and noise is not None:
85
- x = fma.fma(x, dcoefs.to(x.dtype).reshape(
86
- batch_size, -1, 1, 1), noise.to(x.dtype))
87
- elif demodulate:
88
- x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
89
- elif noise is not None:
90
- x = x.add_(noise.to(x.dtype))
91
- return x
92
-
93
- # Execute as one fused op using grouped convolution.
94
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
95
- batch_size = int(batch_size)
96
- misc.assert_shape(x, [batch_size, in_channels, None, None])
97
- x = x.reshape(1, -1, *x.shape[2:])
98
- w = w.reshape(-1, in_channels, kh, kw)
99
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(
100
- x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
101
- x = x.reshape(batch_size, -1, *x.shape[2:])
102
- if noise is not None:
103
- x = x.add_(noise)
104
- return x
105
-
106
- # ----------------------------------------------------------------------------
107
-
108
-
109
- @persistence.persistent_class
110
- class FullyConnectedLayer(torch.nn.Module):
111
- def __init__(self,
112
- in_features, # Number of input features.
113
- out_features, # Number of output features.
114
- bias=True, # Apply additive bias before the activation function?
115
- # Activation function: 'relu', 'lrelu', etc.
116
- activation='linear',
117
- lr_multiplier=1, # Learning rate multiplier.
118
- bias_init=0, # Initial value for the additive bias.
119
- ):
120
- super().__init__()
121
- self.in_features = in_features
122
- self.out_features = out_features
123
- self.activation = activation
124
- self.weight = torch.nn.Parameter(torch.randn(
125
- [out_features, in_features]) / lr_multiplier)
126
- self.bias = torch.nn.Parameter(torch.full(
127
- [out_features], np.float32(bias_init))) if bias else None
128
- self.weight_gain = lr_multiplier / np.sqrt(in_features)
129
- self.bias_gain = lr_multiplier
130
-
131
- def forward(self, x):
132
- w = self.weight.to(x.dtype) * self.weight_gain
133
- b = self.bias
134
- if b is not None:
135
- b = b.to(x.dtype)
136
- if self.bias_gain != 1:
137
- b = b * self.bias_gain
138
-
139
- if self.activation == 'linear' and b is not None:
140
- x = torch.addmm(b.unsqueeze(0), x, w.t())
141
- else:
142
- x = x.matmul(w.t())
143
- x = bias_act.bias_act(x, b, act=self.activation)
144
- return x
145
-
146
- def extra_repr(self):
147
- return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
148
-
149
- # ----------------------------------------------------------------------------
150
-
151
-
152
- @persistence.persistent_class
153
- class Conv2dLayer(torch.nn.Module):
154
- def __init__(self,
155
- in_channels, # Number of input channels.
156
- out_channels, # Number of output channels.
157
- # Width and height of the convolution kernel.
158
- kernel_size,
159
- bias=True, # Apply additive bias before the activation function?
160
- # Activation function: 'relu', 'lrelu', etc.
161
- activation='linear',
162
- up=1, # Integer upsampling factor.
163
- down=1, # Integer downsampling factor.
164
- # Low-pass filter to apply when resampling activations.
165
- resample_filter=[1, 3, 3, 1],
166
- # Clamp the output to +-X, None = disable clamping.
167
- conv_clamp=None,
168
- channels_last=False, # Expect the input to have memory_format=channels_last?
169
- trainable=True, # Update the weights of this layer during training?
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.activation = activation
175
- self.up = up
176
- self.down = down
177
- self.conv_clamp = conv_clamp
178
- self.register_buffer(
179
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
180
- self.padding = kernel_size // 2
181
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
182
- self.act_gain = bias_act.activation_funcs[activation].def_gain
183
-
184
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
185
- weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
186
- memory_format=memory_format)
187
- bias = torch.zeros([out_channels]) if bias else None
188
- if trainable:
189
- self.weight = torch.nn.Parameter(weight)
190
- self.bias = torch.nn.Parameter(bias) if bias is not None else None
191
- else:
192
- self.register_buffer('weight', weight)
193
- if bias is not None:
194
- self.register_buffer('bias', bias)
195
- else:
196
- self.bias = None
197
-
198
- def forward(self, x, gain=1):
199
- w = self.weight * self.weight_gain
200
- b = self.bias.to(x.dtype) if self.bias is not None else None
201
- flip_weight = (self.up == 1) # slightly faster
202
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(
203
- x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
204
-
205
- act_gain = self.act_gain * gain
206
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
207
- x = bias_act.bias_act(x, b, act=self.activation,
208
- gain=act_gain, clamp=act_clamp)
209
- return x
210
-
211
- def extra_repr(self):
212
- return ' '.join([
213
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
214
- f'up={self.up}, down={self.down}'])
215
-
216
- # ----------------------------------------------------------------------------
217
-
218
-
219
- @persistence.persistent_class
220
- class MappingNetwork(torch.nn.Module):
221
- def __init__(self,
222
- # Input latent (Z) dimensionality, 0 = no latent.
223
- z_dim,
224
- # Conditioning label (C) dimensionality, 0 = no label.
225
- c_dim,
226
- # Intermediate latent (W) dimensionality.
227
- w_dim,
228
- # Number of intermediate latents to output, None = do not broadcast.
229
- num_ws,
230
- num_layers=8, # Number of mapping layers.
231
- # Label embedding dimensionality, None = same as w_dim.
232
- embed_features=None,
233
- # Number of intermediate features in the mapping layers, None = same as w_dim.
234
- layer_features=None,
235
- # Activation function: 'relu', 'lrelu', etc.
236
- activation='lrelu',
237
- # Learning rate multiplier for the mapping layers.
238
- lr_multiplier=0.01,
239
- # Decay for tracking the moving average of W during training, None = do not track.
240
- w_avg_beta=0.998,
241
- ):
242
- super().__init__()
243
- self.z_dim = z_dim
244
- self.c_dim = c_dim
245
- self.w_dim = w_dim
246
- self.num_ws = num_ws
247
- self.num_layers = num_layers
248
- self.w_avg_beta = w_avg_beta
249
-
250
- if embed_features is None:
251
- embed_features = w_dim
252
- if c_dim == 0:
253
- embed_features = 0
254
- if layer_features is None:
255
- layer_features = w_dim
256
- features_list = [z_dim + embed_features] + \
257
- [layer_features] * (num_layers - 1) + [w_dim]
258
-
259
- if c_dim > 0:
260
- self.embed = FullyConnectedLayer(c_dim, embed_features)
261
- for idx in range(num_layers):
262
- in_features = features_list[idx]
263
- out_features = features_list[idx + 1]
264
- layer = FullyConnectedLayer(
265
- in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
266
- setattr(self, f'fc{idx}', layer)
267
-
268
- if num_ws is not None and w_avg_beta is not None:
269
- self.register_buffer('w_avg', torch.zeros([w_dim]))
270
-
271
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
272
- # Embed, normalize, and concat inputs.
273
- x = None
274
- with torch.autograd.profiler.record_function('input'):
275
- if self.z_dim > 0:
276
- misc.assert_shape(z, [None, self.z_dim])
277
- x = normalize_2nd_moment(z.to(torch.float32))
278
- if self.c_dim > 0:
279
- misc.assert_shape(c, [None, self.c_dim])
280
- y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
281
- x = torch.cat([x, y], dim=1) if x is not None else y
282
-
283
- # Main layers.
284
- for idx in range(self.num_layers):
285
- layer = getattr(self, f'fc{idx}')
286
- x = layer(x)
287
-
288
- # Update moving average of W.
289
- if update_emas and self.w_avg_beta is not None:
290
- with torch.autograd.profiler.record_function('update_w_avg'):
291
- self.w_avg.copy_(x.detach().mean(
292
- dim=0).lerp(self.w_avg, self.w_avg_beta))
293
-
294
- # Broadcast.
295
- if self.num_ws is not None:
296
- with torch.autograd.profiler.record_function('broadcast'):
297
- x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
298
-
299
- # Apply truncation.
300
- if truncation_psi != 1:
301
- with torch.autograd.profiler.record_function('truncate'):
302
- assert self.w_avg_beta is not None
303
- if self.num_ws is None or truncation_cutoff is None:
304
- x = self.w_avg.lerp(x, truncation_psi)
305
- else:
306
- x[:, :truncation_cutoff] = self.w_avg.lerp(
307
- x[:, :truncation_cutoff], truncation_psi)
308
- return x
309
-
310
- def extra_repr(self):
311
- return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
312
-
313
- # ----------------------------------------------------------------------------
314
-
315
-
316
- @persistence.persistent_class
317
- class SynthesisLayer(torch.nn.Module):
318
- def __init__(self,
319
- in_channels, # Number of input channels.
320
- out_channels, # Number of output channels.
321
- # Intermediate latent (W) dimensionality.
322
- w_dim,
323
- resolution, # Resolution of this layer.
324
- kernel_size=3, # Convolution kernel size.
325
- up=1, # Integer upsampling factor.
326
- use_noise=True, # Enable noise input?
327
- # Activation function: 'relu', 'lrelu', etc.
328
- activation='lrelu',
329
- # Low-pass filter to apply when resampling activations.
330
- resample_filter=[1, 3, 3, 1],
331
- # Clamp the output of convolution layers to +-X, None = disable clamping.
332
- conv_clamp=None,
333
- channels_last=False, # Use channels_last format for the weights?
334
- square=False, # default if for rectangle images
335
- ):
336
- super().__init__()
337
- self.in_channels = in_channels
338
- self.out_channels = out_channels
339
- self.w_dim = w_dim
340
- self.resolution = resolution
341
- self.up = up
342
- self.use_noise = use_noise
343
- self.activation = activation
344
- self.conv_clamp = conv_clamp
345
- self.register_buffer(
346
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
347
- self.padding = kernel_size // 2
348
- self.act_gain = bias_act.activation_funcs[activation].def_gain
349
- self.square = square
350
-
351
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
352
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
353
- self.weight = torch.nn.Parameter(torch.randn(
354
- [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
355
- if use_noise:
356
- if self.square:
357
- self.register_buffer(
358
- 'noise_const', torch.randn([resolution, resolution]))
359
- else:
360
- self.register_buffer('noise_const', torch.randn(
361
- [resolution, resolution // 2]))
362
- self.noise_strength = torch.nn.Parameter(torch.zeros([]))
363
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
364
-
365
- def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
366
- assert noise_mode in ['random', 'const', 'none']
367
- in_resolution = self.resolution // self.up
368
- if self.square:
369
- misc.assert_shape(
370
- x, [None, self.weight.shape[1], in_resolution, in_resolution])
371
- else:
372
- misc.assert_shape(
373
- x, [None, self.weight.shape[1], in_resolution, in_resolution // 2])
374
- styles = self.affine(w)
375
-
376
- noise = None
377
- if self.use_noise and noise_mode == 'random':
378
- if self.square:
379
- noise = torch.randn(
380
- [x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
381
- else:
382
- noise = torch.randn(
383
- [x.shape[0], 1, self.resolution, self.resolution // 2], device=x.device) * self.noise_strength
384
- if self.use_noise and noise_mode == 'const':
385
- noise = self.noise_const * self.noise_strength
386
-
387
- flip_weight = (self.up == 1) # slightly faster
388
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
389
- padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
390
-
391
- act_gain = self.act_gain * gain
392
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
393
- x = bias_act.bias_act(x, self.bias.to(
394
- x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
395
- return x
396
-
397
- def extra_repr(self):
398
- return ' '.join([
399
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
400
- f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
401
-
402
- # ----------------------------------------------------------------------------
403
-
404
-
405
- @persistence.persistent_class
406
- class ToRGBLayer(torch.nn.Module):
407
- def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
408
- super().__init__()
409
- self.in_channels = in_channels
410
- self.out_channels = out_channels
411
- self.w_dim = w_dim
412
- self.conv_clamp = conv_clamp
413
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
414
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
415
- self.weight = torch.nn.Parameter(torch.randn(
416
- [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
417
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
418
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
419
-
420
- def forward(self, x, w, fused_modconv=True):
421
- styles = self.affine(w) * self.weight_gain
422
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
423
- demodulate=False, fused_modconv=fused_modconv)
424
- x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
425
- return x
426
-
427
- def extra_repr(self):
428
- return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
429
-
430
- # ----------------------------------------------------------------------------
431
-
432
-
433
- @persistence.persistent_class
434
- class SynthesisBlock(torch.nn.Module):
435
- def __init__(self,
436
- # Number of input channels, 0 = first block.
437
- in_channels,
438
- # Number of output channels.
439
- out_channels,
440
- # Intermediate latent (W) dimensionality.
441
- w_dim,
442
- # Resolution of this block.
443
- resolution,
444
- # Number of output color channels.
445
- img_channels,
446
- is_last, # Is this the last block?
447
- # Architecture: 'orig', 'skip', 'resnet'.
448
- architecture='skip',
449
- # Low-pass filter to apply when resampling activations.
450
- resample_filter=[1, 3, 3, 1],
451
- # Clamp the output of convolution layers to +-X, None = disable clamping.
452
- conv_clamp=256,
453
- use_fp16=False, # Use FP16 for this block?
454
- fp16_channels_last=False, # Use channels-last memory format with FP16?
455
- square=False, # default is for rectangle images
456
- # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
457
- fused_modconv_default=True,
458
- # Arguments for SynthesisLayer.
459
- **layer_kwargs,
460
- ):
461
- assert architecture in ['orig', 'skip', 'resnet']
462
- super().__init__()
463
- self.in_channels = in_channels
464
- self.w_dim = w_dim
465
- self.resolution = resolution
466
- self.img_channels = img_channels
467
- self.is_last = is_last
468
- self.architecture = architecture
469
- self.use_fp16 = use_fp16
470
- self.channels_last = (use_fp16 and fp16_channels_last)
471
- self.fused_modconv_default = fused_modconv_default
472
- self.register_buffer(
473
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
474
- self.num_conv = 0
475
- self.num_torgb = 0
476
- self.square = square
477
-
478
- if in_channels == 0:
479
- if self.square:
480
- self.const = torch.nn.Parameter(torch.randn(
481
- [out_channels, resolution, resolution]))
482
- else: # rectangle
483
- self.const = torch.nn.Parameter(torch.randn(
484
- [out_channels, resolution, resolution // 2]))
485
-
486
- if in_channels != 0:
487
- self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
488
- resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
489
- self.num_conv += 1
490
-
491
- self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
492
- conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
493
- self.num_conv += 1
494
-
495
- if is_last or architecture == 'skip':
496
- self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
497
- conv_clamp=conv_clamp, channels_last=self.channels_last)
498
- self.num_torgb += 1
499
-
500
- if in_channels != 0 and architecture == 'resnet':
501
- self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
502
- resample_filter=resample_filter, channels_last=self.channels_last)
503
-
504
- def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
505
- _ = update_emas # unused
506
- misc.assert_shape(
507
- ws, [None, self.num_conv + self.num_torgb, self.w_dim])
508
- w_iter = iter(ws.unbind(dim=1))
509
- if ws.device.type != 'cuda':
510
- force_fp32 = True
511
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
512
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
513
- if fused_modconv is None:
514
- fused_modconv = self.fused_modconv_default
515
- if fused_modconv == 'inference_only':
516
- fused_modconv = (not self.training)
517
-
518
- # Input.
519
- if self.in_channels == 0:
520
- x = self.const.to(dtype=dtype, memory_format=memory_format)
521
- x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
522
- else:
523
- if self.square:
524
- misc.assert_shape(
525
- x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
526
- else: # rectangle
527
- misc.assert_shape(
528
- x, [None, self.in_channels, self.resolution // 2, self.resolution // 4])
529
- x = x.to(dtype=dtype, memory_format=memory_format)
530
-
531
- # Main layers.
532
- if self.in_channels == 0:
533
- x = self.conv1(x, next(w_iter),
534
- fused_modconv=fused_modconv, **layer_kwargs)
535
- elif self.architecture == 'resnet':
536
- y = self.skip(x, gain=np.sqrt(0.5))
537
- x = self.conv0(x, next(w_iter),
538
- fused_modconv=fused_modconv, **layer_kwargs)
539
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
540
- gain=np.sqrt(0.5), **layer_kwargs)
541
- x = y.add_(x)
542
- else:
543
- x = self.conv0(x, next(w_iter),
544
- fused_modconv=fused_modconv, **layer_kwargs)
545
- x = self.conv1(x, next(w_iter),
546
- fused_modconv=fused_modconv, **layer_kwargs)
547
-
548
- # ToRGB.
549
- if img is not None:
550
- if self.square:
551
- misc.assert_shape(
552
- img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
553
- else:
554
- misc.assert_shape(
555
- img, [None, self.img_channels, self.resolution // 2, self.resolution // 4])
556
- img = upfirdn2d.upsample2d(img, self.resample_filter)
557
- if self.is_last or self.architecture == 'skip':
558
- y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
559
- y = y.to(dtype=torch.float32,
560
- memory_format=torch.contiguous_format)
561
- img = img.add_(y) if img is not None else y
562
-
563
- assert x.dtype == dtype
564
- assert img is None or img.dtype == torch.float32
565
- return x, img
566
-
567
- def extra_repr(self):
568
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
569
-
570
- # ----------------------------------------------------------------------------
571
-
572
-
573
- @persistence.persistent_class
574
- class SynthesisNetwork(torch.nn.Module):
575
- def __init__(self,
576
- # Intermediate latent (W) dimensionality.
577
- w_dim,
578
- img_resolution, # Output image resolution.
579
- img_channels, # Number of color channels.
580
- square,
581
- # Overall multiplier for the number of channels.
582
- channel_base=32768,
583
- # Maximum number of channels in any layer.
584
- channel_max=512,
585
- # Use FP16 for the N highest resolutions.
586
- num_fp16_res=4,
587
- **block_kwargs, # Arguments for SynthesisBlock.
588
- ):
589
- assert img_resolution >= 4 and img_resolution & (
590
- img_resolution - 1) == 0
591
- super().__init__()
592
- self.w_dim = w_dim
593
- self.img_resolution = img_resolution
594
- self.img_resolution_log2 = int(np.log2(img_resolution))
595
- self.img_channels = img_channels
596
- self.square = square
597
- self.num_fp16_res = num_fp16_res
598
- self.block_resolutions = [
599
- 2 ** i for i in range(2, self.img_resolution_log2 + 1)]
600
- channels_dict = {res: min(channel_base // res, channel_max)
601
- for res in self.block_resolutions}
602
- fp16_resolution = max(
603
- 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
604
-
605
- self.num_ws = 0
606
- for res in self.block_resolutions:
607
- in_channels = channels_dict[res // 2] if res > 4 else 0
608
- out_channels = channels_dict[res]
609
- use_fp16 = (res >= fp16_resolution)
610
- is_last = (res == self.img_resolution)
611
- block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
612
- img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, square=square, **block_kwargs)
613
- self.num_ws += block.num_conv
614
- if is_last:
615
- self.num_ws += block.num_torgb
616
- setattr(self, f'b{res}', block)
617
-
618
- def forward(self, ws, **block_kwargs):
619
- block_ws = []
620
- with torch.autograd.profiler.record_function('split_ws'):
621
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
622
- ws = ws.to(torch.float32)
623
- w_idx = 0
624
- for res in self.block_resolutions:
625
- block = getattr(self, f'b{res}')
626
- block_ws.append(
627
- ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
628
- w_idx += block.num_conv
629
-
630
- x = img = None
631
- for res, cur_ws in zip(self.block_resolutions, block_ws):
632
- block = getattr(self, f'b{res}')
633
- x, img = block(x, img, cur_ws, **block_kwargs)
634
- return img
635
-
636
- def extra_repr(self):
637
- return ' '.join([
638
- f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
639
- f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
640
- f'num_fp16_res={self.num_fp16_res:d}'])
641
-
642
- # ----------------------------------------------------------------------------
643
-
644
-
645
- @persistence.persistent_class
646
- class Generator(torch.nn.Module):
647
- def __init__(self,
648
- z_dim, # Input latent (Z) dimensionality.
649
- # Conditioning label (C) dimensionality.
650
- c_dim,
651
- # Intermediate latent (W) dimensionality.
652
- w_dim,
653
- square,
654
- img_resolution, # Output resolution.
655
- img_channels, # Number of output color channels.
656
- mapping_kwargs={}, # Arguments for MappingNetwork.
657
- **synthesis_kwargs, # Arguments for SynthesisNetwork.
658
- ):
659
- super().__init__()
660
- self.z_dim = z_dim
661
- self.c_dim = c_dim
662
- self.w_dim = w_dim
663
- self.square = square
664
- self.img_resolution = img_resolution
665
- self.img_channels = img_channels
666
- self.synthesis = SynthesisNetwork(
667
- w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, square=square, **synthesis_kwargs)
668
- self.num_ws = self.synthesis.num_ws
669
- self.mapping = MappingNetwork(
670
- z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
671
-
672
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
673
- ws = self.mapping(z, c, truncation_psi=truncation_psi,
674
- truncation_cutoff=truncation_cutoff, update_emas=update_emas)
675
- img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
676
- return img
677
-
678
- # ----------------------------------------------------------------------------
679
-
680
-
681
- @persistence.persistent_class
682
- class DiscriminatorBlock(torch.nn.Module):
683
- def __init__(self,
684
- # Number of input channels, 0 = first block.
685
- in_channels,
686
- # Number of intermediate channels.
687
- tmp_channels,
688
- # Number of output channels.
689
- out_channels,
690
- # Resolution of this block.
691
- resolution,
692
- # Number of input color channels.
693
- img_channels,
694
- # Index of the first layer.
695
- first_layer_idx,
696
- # Architecture: 'orig', 'skip', 'resnet'.
697
- architecture='resnet',
698
- # Activation function: 'relu', 'lrelu', etc.
699
- activation='lrelu',
700
- # Low-pass filter to apply when resampling activations.
701
- resample_filter=[1, 3, 3, 1],
702
- # Clamp the output of convolution layers to +-X, None = disable clamping.
703
- conv_clamp=None,
704
- use_fp16=False, # Use FP16 for this block?
705
- fp16_channels_last=False, # Use channels-last memory format with FP16?
706
- # Freeze-D: Number of layers to freeze.
707
- freeze_layers=0,
708
- square=False,
709
- ):
710
- assert in_channels in [0, tmp_channels]
711
- assert architecture in ['orig', 'skip', 'resnet']
712
- super().__init__()
713
- self.in_channels = in_channels
714
- self.resolution = resolution
715
- self.img_channels = img_channels
716
- self.first_layer_idx = first_layer_idx
717
- self.architecture = architecture
718
- self.use_fp16 = use_fp16
719
- self.channels_last = (use_fp16 and fp16_channels_last)
720
- self.register_buffer(
721
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
722
- self.square = square
723
-
724
- self.num_layers = 0
725
-
726
- def trainable_gen():
727
- while True:
728
- layer_idx = self.first_layer_idx + self.num_layers
729
- trainable = (layer_idx >= freeze_layers)
730
- self.num_layers += 1
731
- yield trainable
732
- trainable_iter = trainable_gen()
733
-
734
- if in_channels == 0 or architecture == 'skip':
735
- self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
736
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
737
-
738
- self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
739
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
740
-
741
- self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
742
- trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
743
-
744
- if architecture == 'resnet':
745
- self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
746
- trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
747
-
748
- def forward(self, x, img, force_fp32=False):
749
- if (x if x is not None else img).device.type != 'cuda':
750
- force_fp32 = True
751
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
752
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
753
-
754
- # Input.
755
- if x is not None:
756
- if self.square:
757
- misc.assert_shape(
758
- x, [None, self.in_channels, self.resolution, self.resolution])
759
- else:
760
- misc.assert_shape(
761
- x, [None, self.in_channels, self.resolution, self.resolution // 2])
762
- x = x.to(dtype=dtype, memory_format=memory_format)
763
-
764
- # FromRGB.
765
- if self.in_channels == 0 or self.architecture == 'skip':
766
- if self.square:
767
- misc.assert_shape(
768
- img, [None, self.img_channels, self.resolution, self.resolution])
769
- else:
770
- misc.assert_shape(
771
- img, [None, self.img_channels, self.resolution, self.resolution // 2])
772
- img = img.to(dtype=dtype, memory_format=memory_format)
773
- y = self.fromrgb(img)
774
- x = x + y if x is not None else y
775
- img = upfirdn2d.downsample2d(
776
- img, self.resample_filter) if self.architecture == 'skip' else None
777
-
778
- # Main layers.
779
- if self.architecture == 'resnet':
780
- y = self.skip(x, gain=np.sqrt(0.5))
781
- x = self.conv0(x)
782
- x = self.conv1(x, gain=np.sqrt(0.5))
783
- x = y.add_(x)
784
- else:
785
- x = self.conv0(x)
786
- x = self.conv1(x)
787
-
788
- assert x.dtype == dtype
789
- return x, img
790
-
791
- def extra_repr(self):
792
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
793
-
794
- # ----------------------------------------------------------------------------
795
-
796
-
797
- @persistence.persistent_class
798
- class MinibatchStdLayer(torch.nn.Module):
799
- def __init__(self, group_size, num_channels=1):
800
- super().__init__()
801
- self.group_size = group_size
802
- self.num_channels = num_channels
803
-
804
- def forward(self, x):
805
- N, C, H, W = x.shape
806
- with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
807
- G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
808
- N)) if self.group_size is not None else N
809
- F = self.num_channels
810
- c = C // F
811
-
812
- # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
813
- y = x.reshape(G, -1, F, c, H, W)
814
- # [GnFcHW] Subtract mean over group.
815
- y = y - y.mean(dim=0)
816
- # [nFcHW] Calc variance over group.
817
- y = y.square().mean(dim=0)
818
- y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
819
- # [nF] Take average over channels and pixels.
820
- y = y.mean(dim=[2, 3, 4])
821
- y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
822
- # [NFHW] Replicate over group and pixels.
823
- y = y.repeat(G, 1, H, W)
824
- # [NCHW] Append to input as new channels.
825
- x = torch.cat([x, y], dim=1)
826
- return x
827
-
828
- def extra_repr(self):
829
- return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
830
-
831
- # ----------------------------------------------------------------------------
832
-
833
-
834
- @persistence.persistent_class
835
- class DiscriminatorEpilogue(torch.nn.Module):
836
- def __init__(self,
837
- in_channels, # Number of input channels.
838
- # Dimensionality of mapped conditioning label, 0 = no label.
839
- cmap_dim,
840
- resolution, # Resolution of this block.
841
- # Number of input color channels.
842
- img_channels,
843
- # Architecture: 'orig', 'skip', 'resnet'.
844
- architecture='resnet',
845
- # Group size for the minibatch standard deviation layer, None = entire minibatch.
846
- mbstd_group_size=4,
847
- # Number of features for the minibatch standard deviation layer, 0 = disable.
848
- mbstd_num_channels=1,
849
- # Activation function: 'relu', 'lrelu', etc.
850
- activation='lrelu',
851
- # Clamp the output of convolution layers to +-X, None = disable clamping.
852
- conv_clamp=None,
853
- square=False,
854
- ):
855
- assert architecture in ['orig', 'skip', 'resnet']
856
- super().__init__()
857
- self.in_channels = in_channels
858
- self.cmap_dim = cmap_dim
859
- self.resolution = resolution
860
- self.img_channels = img_channels
861
- self.architecture = architecture
862
- self.square = square
863
-
864
- if architecture == 'skip':
865
- self.fromrgb = Conv2dLayer(
866
- img_channels, in_channels, kernel_size=1, activation=activation)
867
- self.mbstd = MinibatchStdLayer(
868
- group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
869
- self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
870
- kernel_size=3, activation=activation, conv_clamp=conv_clamp)
871
-
872
- if self.square:
873
- self.fc = FullyConnectedLayer(
874
- in_channels * (resolution ** 2), in_channels, activation=activation)
875
- else:
876
- self.fc = FullyConnectedLayer(
877
- in_channels * (resolution ** 2 // 2), in_channels, activation=activation)
878
-
879
- self.out = FullyConnectedLayer(
880
- in_channels, 1 if cmap_dim == 0 else cmap_dim)
881
-
882
- def forward(self, x, img, cmap, force_fp32=False):
883
- if self.square:
884
- misc.assert_shape(x, [None, self.in_channels,
885
- self.resolution, self.resolution])
886
- else:
887
- misc.assert_shape(
888
- x, [None, self.in_channels, self.resolution, self.resolution // 2]) # [NCHW]
889
-
890
- _ = force_fp32 # unused
891
- dtype = torch.float32
892
- memory_format = torch.contiguous_format
893
-
894
- # FromRGB.
895
- x = x.to(dtype=dtype, memory_format=memory_format)
896
- if self.architecture == 'skip':
897
- if self.square:
898
- misc.assert_shape(
899
- img, [None, self.img_channels, self.resolution, self.resolution])
900
- else:
901
- misc.assert_shape(
902
- img, [None, self.img_channels, self.resolution, self.resolution // 2])
903
-
904
- img = img.to(dtype=dtype, memory_format=memory_format)
905
- x = x + self.fromrgb(img)
906
-
907
- # Main layers.
908
- if self.mbstd is not None:
909
- x = self.mbstd(x)
910
- x = self.conv(x)
911
- x = self.fc(x.flatten(1))
912
- x = self.out(x)
913
-
914
- # Conditioning.
915
- if self.cmap_dim > 0:
916
- misc.assert_shape(cmap, [None, self.cmap_dim])
917
- x = (x * cmap).sum(dim=1, keepdim=True) * \
918
- (1 / np.sqrt(self.cmap_dim))
919
-
920
- assert x.dtype == dtype
921
- return x
922
-
923
- def extra_repr(self):
924
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
925
-
926
- # ----------------------------------------------------------------------------
927
-
928
-
929
- @persistence.persistent_class
930
- class Discriminator(torch.nn.Module):
931
- def __init__(self,
932
- # Conditioning label (C) dimensionality.
933
- c_dim,
934
- img_resolution, # Input resolution.
935
- # Number of input color channels.
936
- img_channels,
937
- # Architecture: 'orig', 'skip', 'resnet'.
938
- architecture='resnet',
939
- # Overall multiplier for the number of channels.
940
- channel_base=32768,
941
- # Maximum number of channels in any layer.
942
- channel_max=512,
943
- # Use FP16 for the N highest resolutions.
944
- num_fp16_res=4,
945
- # Clamp the output of convolution layers to +-X, None = disable clamping.
946
- conv_clamp=256,
947
- # Dimensionality of mapped conditioning label, None = default.
948
- cmap_dim=None,
949
- square=False, # default for rectangle images
950
- block_kwargs={}, # Arguments for DiscriminatorBlock.
951
- mapping_kwargs={}, # Arguments for MappingNetwork.
952
- # Arguments for DiscriminatorEpilogue.
953
- epilogue_kwargs={},
954
- ):
955
- super().__init__()
956
- self.c_dim = c_dim
957
- self.img_resolution = img_resolution
958
- self.img_resolution_log2 = int(np.log2(img_resolution))
959
- self.img_channels = img_channels
960
- self.square = square
961
- self.block_resolutions = [
962
- 2 ** i for i in range(self.img_resolution_log2, 2, -1)]
963
- channels_dict = {res: min(channel_base // res, channel_max)
964
- for res in self.block_resolutions + [4]}
965
- fp16_resolution = max(
966
- 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
967
-
968
- if cmap_dim is None:
969
- cmap_dim = channels_dict[4]
970
- if c_dim == 0:
971
- cmap_dim = 0
972
-
973
- common_kwargs = dict(img_channels=img_channels,
974
- architecture=architecture, conv_clamp=conv_clamp)
975
- cur_layer_idx = 0
976
- for res in self.block_resolutions:
977
- in_channels = channels_dict[res] if res < img_resolution else 0
978
- tmp_channels = channels_dict[res]
979
- out_channels = channels_dict[res // 2]
980
- use_fp16 = (res >= fp16_resolution)
981
- block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
982
- first_layer_idx=cur_layer_idx, use_fp16=use_fp16, square=square, **block_kwargs, **common_kwargs)
983
- setattr(self, f'b{res}', block)
984
- cur_layer_idx += block.num_layers
985
- if c_dim > 0:
986
- self.mapping = MappingNetwork(
987
- z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
988
- self.b4 = DiscriminatorEpilogue(
989
- channels_dict[4], cmap_dim=cmap_dim, resolution=4, square=square, **epilogue_kwargs, **common_kwargs)
990
-
991
- def forward(self, img, c, update_emas=False, **block_kwargs):
992
- _ = update_emas # unused
993
- x = None
994
- for res in self.block_resolutions:
995
- block = getattr(self, f'b{res}')
996
- x, img = block(x, img, **block_kwargs)
997
-
998
- cmap = None
999
- if self.c_dim > 0:
1000
- cmap = self.mapping(None, c)
1001
- x = self.b4(x, img, cmap)
1002
- return x
1003
-
1004
- def extra_repr(self):
1005
- return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
1006
-
1007
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andreean/Sentiment-Analysis-Bitcoin/app.py DELETED
@@ -1,34 +0,0 @@
1
- import streamlit as st
2
- import tensorflow as tf
3
- from tensorflow import keras
4
- import pandas as pd
5
- import numpy as np
6
- from PIL import Image
7
-
8
-
9
- from tensorflow.keras.models import load_model
10
-
11
- st.set_page_config(page_title = 'Sentiment Analysis Bitcoin',
12
- initial_sidebar_state = "expanded",
13
- menu_items = {
14
- 'About' : 'Milestone 2 Fase 2'
15
- })
16
-
17
- image = Image.open('bitcoin.png')
18
-
19
- # load model
20
- model = keras.models.load_model("model_bitcoin")
21
-
22
-
23
- label = ['Negative', 'Neutral', 'Positive']
24
-
25
- st.title("Sentiment Analysis Bitcoin")
26
- st.image(image)
27
-
28
- news_title = st.text_input('Enter a Tweet Bitcoin')
29
- new_data = pd.DataFrame([news_title])
30
- res = model.predict(new_data)
31
- res = res.argmax()
32
- press = st.button('Predict')
33
- if press:
34
- st.title(label[res])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anustup/NS_AI_LABS/README.md DELETED
@@ -1,66 +0,0 @@
1
- ---
2
- title: Whisper Webui
3
- emoji: ⚡
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.3.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
-
15
- # Running Locally
16
-
17
- To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies:
18
- ```
19
- pip install -r requirements.txt
20
- ```
21
-
22
- Finally, run the full version (no audio length restrictions) of the app:
23
- ```
24
- python app-full.py
25
- ```
26
-
27
- You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments:
28
- ```
29
- python cli.py \
30
- [--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \
31
- [--vad_merge_window VAD_MERGE_WINDOW] \
32
- [--vad_max_merge_size VAD_MAX_MERGE_SIZE] \
33
- [--vad_padding VAD_PADDING] \
34
- [--vad_prompt_window VAD_PROMPT_WINDOW]
35
- ```
36
- In addition, you may also use URL's in addition to file paths as input.
37
- ```
38
- python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM"
39
- ```
40
-
41
- # Docker
42
-
43
- To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU. Then
44
- check out this repository and build an image:
45
- ```
46
- sudo docker build -t whisper-webui:1 .
47
- ```
48
-
49
- You can then start the WebUI with GPU support like so:
50
- ```
51
- sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1
52
- ```
53
-
54
- Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only:
55
- ```
56
- sudo docker run -d -p 7860:7860 whisper-webui:1
57
- ```
58
-
59
- ## Caching
60
-
61
- Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand.
62
- To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally)
63
- prepopulate the directory with the different Whisper models.
64
- ```
65
- sudo docker run -d --gpus=all -p 7860:7860 --mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper whisper-webui:1
66
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/Tm/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Roop
3
- emoji: 📈
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- license: agpl-3.0
11
- duplicated_from: ezioruan/roop
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/nono/.github/ISSUE_TEMPLATE/installation.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- name: Installation
3
- about: Platform and installation issues
4
- title: '[Installation]'
5
- labels: 'installation'
6
-
7
- ---
8
-
9
- Please **DO NOT OPEN** platform and installation issues!
10
-
11
- - Check the [troubleshooting](https://github.com/s0md3v/roop/wiki/4.-Troubleshooting) that covers many issues.
12
- - Join our helpful community on [Discord](https://discord.gg/Y9p4ZQ2sB9) for instant help.
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/install.py DELETED
@@ -1,775 +0,0 @@
1
- import errno
2
- import json
3
- import operator
4
- import os
5
- import shutil
6
- import site
7
- from optparse import SUPPRESS_HELP, Values
8
- from typing import List, Optional
9
-
10
- from pip._vendor.rich import print_json
11
-
12
- from pip._internal.cache import WheelCache
13
- from pip._internal.cli import cmdoptions
14
- from pip._internal.cli.cmdoptions import make_target_python
15
- from pip._internal.cli.req_command import (
16
- RequirementCommand,
17
- warn_if_run_as_root,
18
- with_cleanup,
19
- )
20
- from pip._internal.cli.status_codes import ERROR, SUCCESS
21
- from pip._internal.exceptions import CommandError, InstallationError
22
- from pip._internal.locations import get_scheme
23
- from pip._internal.metadata import get_environment
24
- from pip._internal.models.installation_report import InstallationReport
25
- from pip._internal.operations.build.build_tracker import get_build_tracker
26
- from pip._internal.operations.check import ConflictDetails, check_install_conflicts
27
- from pip._internal.req import install_given_reqs
28
- from pip._internal.req.req_install import (
29
- InstallRequirement,
30
- check_legacy_setup_py_options,
31
- )
32
- from pip._internal.utils.compat import WINDOWS
33
- from pip._internal.utils.filesystem import test_writable_dir
34
- from pip._internal.utils.logging import getLogger
35
- from pip._internal.utils.misc import (
36
- check_externally_managed,
37
- ensure_dir,
38
- get_pip_version,
39
- protect_pip_from_modification_on_windows,
40
- write_output,
41
- )
42
- from pip._internal.utils.temp_dir import TempDirectory
43
- from pip._internal.utils.virtualenv import (
44
- running_under_virtualenv,
45
- virtualenv_no_global,
46
- )
47
- from pip._internal.wheel_builder import build, should_build_for_install_command
48
-
49
- logger = getLogger(__name__)
50
-
51
-
52
- class InstallCommand(RequirementCommand):
53
- """
54
- Install packages from:
55
-
56
- - PyPI (and other indexes) using requirement specifiers.
57
- - VCS project urls.
58
- - Local project directories.
59
- - Local or remote source archives.
60
-
61
- pip also supports installing from "requirements files", which provide
62
- an easy way to specify a whole environment to be installed.
63
- """
64
-
65
- usage = """
66
- %prog [options] <requirement specifier> [package-index-options] ...
67
- %prog [options] -r <requirements file> [package-index-options] ...
68
- %prog [options] [-e] <vcs project url> ...
69
- %prog [options] [-e] <local project path> ...
70
- %prog [options] <archive url/path> ..."""
71
-
72
- def add_options(self) -> None:
73
- self.cmd_opts.add_option(cmdoptions.requirements())
74
- self.cmd_opts.add_option(cmdoptions.constraints())
75
- self.cmd_opts.add_option(cmdoptions.no_deps())
76
- self.cmd_opts.add_option(cmdoptions.pre())
77
-
78
- self.cmd_opts.add_option(cmdoptions.editable())
79
- self.cmd_opts.add_option(
80
- "--dry-run",
81
- action="store_true",
82
- dest="dry_run",
83
- default=False,
84
- help=(
85
- "Don't actually install anything, just print what would be. "
86
- "Can be used in combination with --ignore-installed "
87
- "to 'resolve' the requirements."
88
- ),
89
- )
90
- self.cmd_opts.add_option(
91
- "-t",
92
- "--target",
93
- dest="target_dir",
94
- metavar="dir",
95
- default=None,
96
- help=(
97
- "Install packages into <dir>. "
98
- "By default this will not replace existing files/folders in "
99
- "<dir>. Use --upgrade to replace existing packages in <dir> "
100
- "with new versions."
101
- ),
102
- )
103
- cmdoptions.add_target_python_options(self.cmd_opts)
104
-
105
- self.cmd_opts.add_option(
106
- "--user",
107
- dest="use_user_site",
108
- action="store_true",
109
- help=(
110
- "Install to the Python user install directory for your "
111
- "platform. Typically ~/.local/, or %APPDATA%\\Python on "
112
- "Windows. (See the Python documentation for site.USER_BASE "
113
- "for full details.)"
114
- ),
115
- )
116
- self.cmd_opts.add_option(
117
- "--no-user",
118
- dest="use_user_site",
119
- action="store_false",
120
- help=SUPPRESS_HELP,
121
- )
122
- self.cmd_opts.add_option(
123
- "--root",
124
- dest="root_path",
125
- metavar="dir",
126
- default=None,
127
- help="Install everything relative to this alternate root directory.",
128
- )
129
- self.cmd_opts.add_option(
130
- "--prefix",
131
- dest="prefix_path",
132
- metavar="dir",
133
- default=None,
134
- help=(
135
- "Installation prefix where lib, bin and other top-level "
136
- "folders are placed. Note that the resulting installation may "
137
- "contain scripts and other resources which reference the "
138
- "Python interpreter of pip, and not that of ``--prefix``. "
139
- "See also the ``--python`` option if the intention is to "
140
- "install packages into another (possibly pip-free) "
141
- "environment."
142
- ),
143
- )
144
-
145
- self.cmd_opts.add_option(cmdoptions.src())
146
-
147
- self.cmd_opts.add_option(
148
- "-U",
149
- "--upgrade",
150
- dest="upgrade",
151
- action="store_true",
152
- help=(
153
- "Upgrade all specified packages to the newest available "
154
- "version. The handling of dependencies depends on the "
155
- "upgrade-strategy used."
156
- ),
157
- )
158
-
159
- self.cmd_opts.add_option(
160
- "--upgrade-strategy",
161
- dest="upgrade_strategy",
162
- default="only-if-needed",
163
- choices=["only-if-needed", "eager"],
164
- help=(
165
- "Determines how dependency upgrading should be handled "
166
- "[default: %default]. "
167
- '"eager" - dependencies are upgraded regardless of '
168
- "whether the currently installed version satisfies the "
169
- "requirements of the upgraded package(s). "
170
- '"only-if-needed" - are upgraded only when they do not '
171
- "satisfy the requirements of the upgraded package(s)."
172
- ),
173
- )
174
-
175
- self.cmd_opts.add_option(
176
- "--force-reinstall",
177
- dest="force_reinstall",
178
- action="store_true",
179
- help="Reinstall all packages even if they are already up-to-date.",
180
- )
181
-
182
- self.cmd_opts.add_option(
183
- "-I",
184
- "--ignore-installed",
185
- dest="ignore_installed",
186
- action="store_true",
187
- help=(
188
- "Ignore the installed packages, overwriting them. "
189
- "This can break your system if the existing package "
190
- "is of a different version or was installed "
191
- "with a different package manager!"
192
- ),
193
- )
194
-
195
- self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
196
- self.cmd_opts.add_option(cmdoptions.no_build_isolation())
197
- self.cmd_opts.add_option(cmdoptions.use_pep517())
198
- self.cmd_opts.add_option(cmdoptions.no_use_pep517())
199
- self.cmd_opts.add_option(cmdoptions.check_build_deps())
200
- self.cmd_opts.add_option(cmdoptions.override_externally_managed())
201
-
202
- self.cmd_opts.add_option(cmdoptions.config_settings())
203
- self.cmd_opts.add_option(cmdoptions.global_options())
204
-
205
- self.cmd_opts.add_option(
206
- "--compile",
207
- action="store_true",
208
- dest="compile",
209
- default=True,
210
- help="Compile Python source files to bytecode",
211
- )
212
-
213
- self.cmd_opts.add_option(
214
- "--no-compile",
215
- action="store_false",
216
- dest="compile",
217
- help="Do not compile Python source files to bytecode",
218
- )
219
-
220
- self.cmd_opts.add_option(
221
- "--no-warn-script-location",
222
- action="store_false",
223
- dest="warn_script_location",
224
- default=True,
225
- help="Do not warn when installing scripts outside PATH",
226
- )
227
- self.cmd_opts.add_option(
228
- "--no-warn-conflicts",
229
- action="store_false",
230
- dest="warn_about_conflicts",
231
- default=True,
232
- help="Do not warn about broken dependencies",
233
- )
234
- self.cmd_opts.add_option(cmdoptions.no_binary())
235
- self.cmd_opts.add_option(cmdoptions.only_binary())
236
- self.cmd_opts.add_option(cmdoptions.prefer_binary())
237
- self.cmd_opts.add_option(cmdoptions.require_hashes())
238
- self.cmd_opts.add_option(cmdoptions.progress_bar())
239
- self.cmd_opts.add_option(cmdoptions.root_user_action())
240
-
241
- index_opts = cmdoptions.make_option_group(
242
- cmdoptions.index_group,
243
- self.parser,
244
- )
245
-
246
- self.parser.insert_option_group(0, index_opts)
247
- self.parser.insert_option_group(0, self.cmd_opts)
248
-
249
- self.cmd_opts.add_option(
250
- "--report",
251
- dest="json_report_file",
252
- metavar="file",
253
- default=None,
254
- help=(
255
- "Generate a JSON file describing what pip did to install "
256
- "the provided requirements. "
257
- "Can be used in combination with --dry-run and --ignore-installed "
258
- "to 'resolve' the requirements. "
259
- "When - is used as file name it writes to stdout. "
260
- "When writing to stdout, please combine with the --quiet option "
261
- "to avoid mixing pip logging output with JSON output."
262
- ),
263
- )
264
-
265
- @with_cleanup
266
- def run(self, options: Values, args: List[str]) -> int:
267
- if options.use_user_site and options.target_dir is not None:
268
- raise CommandError("Can not combine '--user' and '--target'")
269
-
270
- # Check whether the environment we're installing into is externally
271
- # managed, as specified in PEP 668. Specifying --root, --target, or
272
- # --prefix disables the check, since there's no reliable way to locate
273
- # the EXTERNALLY-MANAGED file for those cases. An exception is also
274
- # made specifically for "--dry-run --report" for convenience.
275
- installing_into_current_environment = (
276
- not (options.dry_run and options.json_report_file)
277
- and options.root_path is None
278
- and options.target_dir is None
279
- and options.prefix_path is None
280
- )
281
- if (
282
- installing_into_current_environment
283
- and not options.override_externally_managed
284
- ):
285
- check_externally_managed()
286
-
287
- upgrade_strategy = "to-satisfy-only"
288
- if options.upgrade:
289
- upgrade_strategy = options.upgrade_strategy
290
-
291
- cmdoptions.check_dist_restriction(options, check_target=True)
292
-
293
- logger.verbose("Using %s", get_pip_version())
294
- options.use_user_site = decide_user_install(
295
- options.use_user_site,
296
- prefix_path=options.prefix_path,
297
- target_dir=options.target_dir,
298
- root_path=options.root_path,
299
- isolated_mode=options.isolated_mode,
300
- )
301
-
302
- target_temp_dir: Optional[TempDirectory] = None
303
- target_temp_dir_path: Optional[str] = None
304
- if options.target_dir:
305
- options.ignore_installed = True
306
- options.target_dir = os.path.abspath(options.target_dir)
307
- if (
308
- # fmt: off
309
- os.path.exists(options.target_dir) and
310
- not os.path.isdir(options.target_dir)
311
- # fmt: on
312
- ):
313
- raise CommandError(
314
- "Target path exists but is not a directory, will not continue."
315
- )
316
-
317
- # Create a target directory for using with the target option
318
- target_temp_dir = TempDirectory(kind="target")
319
- target_temp_dir_path = target_temp_dir.path
320
- self.enter_context(target_temp_dir)
321
-
322
- global_options = options.global_options or []
323
-
324
- session = self.get_default_session(options)
325
-
326
- target_python = make_target_python(options)
327
- finder = self._build_package_finder(
328
- options=options,
329
- session=session,
330
- target_python=target_python,
331
- ignore_requires_python=options.ignore_requires_python,
332
- )
333
- build_tracker = self.enter_context(get_build_tracker())
334
-
335
- directory = TempDirectory(
336
- delete=not options.no_clean,
337
- kind="install",
338
- globally_managed=True,
339
- )
340
-
341
- try:
342
- reqs = self.get_requirements(args, options, finder, session)
343
- check_legacy_setup_py_options(options, reqs)
344
-
345
- wheel_cache = WheelCache(options.cache_dir)
346
-
347
- # Only when installing is it permitted to use PEP 660.
348
- # In other circumstances (pip wheel, pip download) we generate
349
- # regular (i.e. non editable) metadata and wheels.
350
- for req in reqs:
351
- req.permit_editable_wheels = True
352
-
353
- preparer = self.make_requirement_preparer(
354
- temp_build_dir=directory,
355
- options=options,
356
- build_tracker=build_tracker,
357
- session=session,
358
- finder=finder,
359
- use_user_site=options.use_user_site,
360
- verbosity=self.verbosity,
361
- )
362
- resolver = self.make_resolver(
363
- preparer=preparer,
364
- finder=finder,
365
- options=options,
366
- wheel_cache=wheel_cache,
367
- use_user_site=options.use_user_site,
368
- ignore_installed=options.ignore_installed,
369
- ignore_requires_python=options.ignore_requires_python,
370
- force_reinstall=options.force_reinstall,
371
- upgrade_strategy=upgrade_strategy,
372
- use_pep517=options.use_pep517,
373
- )
374
-
375
- self.trace_basic_info(finder)
376
-
377
- requirement_set = resolver.resolve(
378
- reqs, check_supported_wheels=not options.target_dir
379
- )
380
-
381
- if options.json_report_file:
382
- report = InstallationReport(requirement_set.requirements_to_install)
383
- if options.json_report_file == "-":
384
- print_json(data=report.to_dict())
385
- else:
386
- with open(options.json_report_file, "w", encoding="utf-8") as f:
387
- json.dump(report.to_dict(), f, indent=2, ensure_ascii=False)
388
-
389
- if options.dry_run:
390
- would_install_items = sorted(
391
- (r.metadata["name"], r.metadata["version"])
392
- for r in requirement_set.requirements_to_install
393
- )
394
- if would_install_items:
395
- write_output(
396
- "Would install %s",
397
- " ".join("-".join(item) for item in would_install_items),
398
- )
399
- return SUCCESS
400
-
401
- try:
402
- pip_req = requirement_set.get_requirement("pip")
403
- except KeyError:
404
- modifying_pip = False
405
- else:
406
- # If we're not replacing an already installed pip,
407
- # we're not modifying it.
408
- modifying_pip = pip_req.satisfied_by is None
409
- protect_pip_from_modification_on_windows(modifying_pip=modifying_pip)
410
-
411
- reqs_to_build = [
412
- r
413
- for r in requirement_set.requirements.values()
414
- if should_build_for_install_command(r)
415
- ]
416
-
417
- _, build_failures = build(
418
- reqs_to_build,
419
- wheel_cache=wheel_cache,
420
- verify=True,
421
- build_options=[],
422
- global_options=global_options,
423
- )
424
-
425
- if build_failures:
426
- raise InstallationError(
427
- "Could not build wheels for {}, which is required to "
428
- "install pyproject.toml-based projects".format(
429
- ", ".join(r.name for r in build_failures) # type: ignore
430
- )
431
- )
432
-
433
- to_install = resolver.get_installation_order(requirement_set)
434
-
435
- # Check for conflicts in the package set we're installing.
436
- conflicts: Optional[ConflictDetails] = None
437
- should_warn_about_conflicts = (
438
- not options.ignore_dependencies and options.warn_about_conflicts
439
- )
440
- if should_warn_about_conflicts:
441
- conflicts = self._determine_conflicts(to_install)
442
-
443
- # Don't warn about script install locations if
444
- # --target or --prefix has been specified
445
- warn_script_location = options.warn_script_location
446
- if options.target_dir or options.prefix_path:
447
- warn_script_location = False
448
-
449
- installed = install_given_reqs(
450
- to_install,
451
- global_options,
452
- root=options.root_path,
453
- home=target_temp_dir_path,
454
- prefix=options.prefix_path,
455
- warn_script_location=warn_script_location,
456
- use_user_site=options.use_user_site,
457
- pycompile=options.compile,
458
- )
459
-
460
- lib_locations = get_lib_location_guesses(
461
- user=options.use_user_site,
462
- home=target_temp_dir_path,
463
- root=options.root_path,
464
- prefix=options.prefix_path,
465
- isolated=options.isolated_mode,
466
- )
467
- env = get_environment(lib_locations)
468
-
469
- installed.sort(key=operator.attrgetter("name"))
470
- items = []
471
- for result in installed:
472
- item = result.name
473
- try:
474
- installed_dist = env.get_distribution(item)
475
- if installed_dist is not None:
476
- item = f"{item}-{installed_dist.version}"
477
- except Exception:
478
- pass
479
- items.append(item)
480
-
481
- if conflicts is not None:
482
- self._warn_about_conflicts(
483
- conflicts,
484
- resolver_variant=self.determine_resolver_variant(options),
485
- )
486
-
487
- installed_desc = " ".join(items)
488
- if installed_desc:
489
- write_output(
490
- "Successfully installed %s",
491
- installed_desc,
492
- )
493
- except OSError as error:
494
- show_traceback = self.verbosity >= 1
495
-
496
- message = create_os_error_message(
497
- error,
498
- show_traceback,
499
- options.use_user_site,
500
- )
501
- logger.error(message, exc_info=show_traceback) # noqa
502
-
503
- return ERROR
504
-
505
- if options.target_dir:
506
- assert target_temp_dir
507
- self._handle_target_dir(
508
- options.target_dir, target_temp_dir, options.upgrade
509
- )
510
- if options.root_user_action == "warn":
511
- warn_if_run_as_root()
512
- return SUCCESS
513
-
514
- def _handle_target_dir(
515
- self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool
516
- ) -> None:
517
- ensure_dir(target_dir)
518
-
519
- # Checking both purelib and platlib directories for installed
520
- # packages to be moved to target directory
521
- lib_dir_list = []
522
-
523
- # Checking both purelib and platlib directories for installed
524
- # packages to be moved to target directory
525
- scheme = get_scheme("", home=target_temp_dir.path)
526
- purelib_dir = scheme.purelib
527
- platlib_dir = scheme.platlib
528
- data_dir = scheme.data
529
-
530
- if os.path.exists(purelib_dir):
531
- lib_dir_list.append(purelib_dir)
532
- if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
533
- lib_dir_list.append(platlib_dir)
534
- if os.path.exists(data_dir):
535
- lib_dir_list.append(data_dir)
536
-
537
- for lib_dir in lib_dir_list:
538
- for item in os.listdir(lib_dir):
539
- if lib_dir == data_dir:
540
- ddir = os.path.join(data_dir, item)
541
- if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
542
- continue
543
- target_item_dir = os.path.join(target_dir, item)
544
- if os.path.exists(target_item_dir):
545
- if not upgrade:
546
- logger.warning(
547
- "Target directory %s already exists. Specify "
548
- "--upgrade to force replacement.",
549
- target_item_dir,
550
- )
551
- continue
552
- if os.path.islink(target_item_dir):
553
- logger.warning(
554
- "Target directory %s already exists and is "
555
- "a link. pip will not automatically replace "
556
- "links, please remove if replacement is "
557
- "desired.",
558
- target_item_dir,
559
- )
560
- continue
561
- if os.path.isdir(target_item_dir):
562
- shutil.rmtree(target_item_dir)
563
- else:
564
- os.remove(target_item_dir)
565
-
566
- shutil.move(os.path.join(lib_dir, item), target_item_dir)
567
-
568
- def _determine_conflicts(
569
- self, to_install: List[InstallRequirement]
570
- ) -> Optional[ConflictDetails]:
571
- try:
572
- return check_install_conflicts(to_install)
573
- except Exception:
574
- logger.exception(
575
- "Error while checking for conflicts. Please file an issue on "
576
- "pip's issue tracker: https://github.com/pypa/pip/issues/new"
577
- )
578
- return None
579
-
580
- def _warn_about_conflicts(
581
- self, conflict_details: ConflictDetails, resolver_variant: str
582
- ) -> None:
583
- package_set, (missing, conflicting) = conflict_details
584
- if not missing and not conflicting:
585
- return
586
-
587
- parts: List[str] = []
588
- if resolver_variant == "legacy":
589
- parts.append(
590
- "pip's legacy dependency resolver does not consider dependency "
591
- "conflicts when selecting packages. This behaviour is the "
592
- "source of the following dependency conflicts."
593
- )
594
- else:
595
- assert resolver_variant == "2020-resolver"
596
- parts.append(
597
- "pip's dependency resolver does not currently take into account "
598
- "all the packages that are installed. This behaviour is the "
599
- "source of the following dependency conflicts."
600
- )
601
-
602
- # NOTE: There is some duplication here, with commands/check.py
603
- for project_name in missing:
604
- version = package_set[project_name][0]
605
- for dependency in missing[project_name]:
606
- message = (
607
- "{name} {version} requires {requirement}, "
608
- "which is not installed."
609
- ).format(
610
- name=project_name,
611
- version=version,
612
- requirement=dependency[1],
613
- )
614
- parts.append(message)
615
-
616
- for project_name in conflicting:
617
- version = package_set[project_name][0]
618
- for dep_name, dep_version, req in conflicting[project_name]:
619
- message = (
620
- "{name} {version} requires {requirement}, but {you} have "
621
- "{dep_name} {dep_version} which is incompatible."
622
- ).format(
623
- name=project_name,
624
- version=version,
625
- requirement=req,
626
- dep_name=dep_name,
627
- dep_version=dep_version,
628
- you=("you" if resolver_variant == "2020-resolver" else "you'll"),
629
- )
630
- parts.append(message)
631
-
632
- logger.critical("\n".join(parts))
633
-
634
-
635
- def get_lib_location_guesses(
636
- user: bool = False,
637
- home: Optional[str] = None,
638
- root: Optional[str] = None,
639
- isolated: bool = False,
640
- prefix: Optional[str] = None,
641
- ) -> List[str]:
642
- scheme = get_scheme(
643
- "",
644
- user=user,
645
- home=home,
646
- root=root,
647
- isolated=isolated,
648
- prefix=prefix,
649
- )
650
- return [scheme.purelib, scheme.platlib]
651
-
652
-
653
- def site_packages_writable(root: Optional[str], isolated: bool) -> bool:
654
- return all(
655
- test_writable_dir(d)
656
- for d in set(get_lib_location_guesses(root=root, isolated=isolated))
657
- )
658
-
659
-
660
- def decide_user_install(
661
- use_user_site: Optional[bool],
662
- prefix_path: Optional[str] = None,
663
- target_dir: Optional[str] = None,
664
- root_path: Optional[str] = None,
665
- isolated_mode: bool = False,
666
- ) -> bool:
667
- """Determine whether to do a user install based on the input options.
668
-
669
- If use_user_site is False, no additional checks are done.
670
- If use_user_site is True, it is checked for compatibility with other
671
- options.
672
- If use_user_site is None, the default behaviour depends on the environment,
673
- which is provided by the other arguments.
674
- """
675
- # In some cases (config from tox), use_user_site can be set to an integer
676
- # rather than a bool, which 'use_user_site is False' wouldn't catch.
677
- if (use_user_site is not None) and (not use_user_site):
678
- logger.debug("Non-user install by explicit request")
679
- return False
680
-
681
- if use_user_site:
682
- if prefix_path:
683
- raise CommandError(
684
- "Can not combine '--user' and '--prefix' as they imply "
685
- "different installation locations"
686
- )
687
- if virtualenv_no_global():
688
- raise InstallationError(
689
- "Can not perform a '--user' install. User site-packages "
690
- "are not visible in this virtualenv."
691
- )
692
- logger.debug("User install by explicit request")
693
- return True
694
-
695
- # If we are here, user installs have not been explicitly requested/avoided
696
- assert use_user_site is None
697
-
698
- # user install incompatible with --prefix/--target
699
- if prefix_path or target_dir:
700
- logger.debug("Non-user install due to --prefix or --target option")
701
- return False
702
-
703
- # If user installs are not enabled, choose a non-user install
704
- if not site.ENABLE_USER_SITE:
705
- logger.debug("Non-user install because user site-packages disabled")
706
- return False
707
-
708
- # If we have permission for a non-user install, do that,
709
- # otherwise do a user install.
710
- if site_packages_writable(root=root_path, isolated=isolated_mode):
711
- logger.debug("Non-user install because site-packages writeable")
712
- return False
713
-
714
- logger.info(
715
- "Defaulting to user installation because normal site-packages "
716
- "is not writeable"
717
- )
718
- return True
719
-
720
-
721
- def create_os_error_message(
722
- error: OSError, show_traceback: bool, using_user_site: bool
723
- ) -> str:
724
- """Format an error message for an OSError
725
-
726
- It may occur anytime during the execution of the install command.
727
- """
728
- parts = []
729
-
730
- # Mention the error if we are not going to show a traceback
731
- parts.append("Could not install packages due to an OSError")
732
- if not show_traceback:
733
- parts.append(": ")
734
- parts.append(str(error))
735
- else:
736
- parts.append(".")
737
-
738
- # Spilt the error indication from a helper message (if any)
739
- parts[-1] += "\n"
740
-
741
- # Suggest useful actions to the user:
742
- # (1) using user site-packages or (2) verifying the permissions
743
- if error.errno == errno.EACCES:
744
- user_option_part = "Consider using the `--user` option"
745
- permissions_part = "Check the permissions"
746
-
747
- if not running_under_virtualenv() and not using_user_site:
748
- parts.extend(
749
- [
750
- user_option_part,
751
- " or ",
752
- permissions_part.lower(),
753
- ]
754
- )
755
- else:
756
- parts.append(permissions_part)
757
- parts.append(".\n")
758
-
759
- # Suggest the user to enable Long Paths if path length is
760
- # more than 260
761
- if (
762
- WINDOWS
763
- and error.errno == errno.ENOENT
764
- and error.filename
765
- and len(error.filename) > 260
766
- ):
767
- parts.append(
768
- "HINT: This error might have occurred since "
769
- "this system does not have Windows Long Path "
770
- "support enabled. You can find information on "
771
- "how to enable this at "
772
- "https://pip.pypa.io/warnings/enable-long-paths\n"
773
- )
774
-
775
- return "".join(parts).strip() + "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/__init__.py DELETED
@@ -1,92 +0,0 @@
1
- import collections
2
- import logging
3
- from typing import Generator, List, Optional, Sequence, Tuple
4
-
5
- from pip._internal.utils.logging import indent_log
6
-
7
- from .req_file import parse_requirements
8
- from .req_install import InstallRequirement
9
- from .req_set import RequirementSet
10
-
11
- __all__ = [
12
- "RequirementSet",
13
- "InstallRequirement",
14
- "parse_requirements",
15
- "install_given_reqs",
16
- ]
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- class InstallationResult:
22
- def __init__(self, name: str) -> None:
23
- self.name = name
24
-
25
- def __repr__(self) -> str:
26
- return f"InstallationResult(name={self.name!r})"
27
-
28
-
29
- def _validate_requirements(
30
- requirements: List[InstallRequirement],
31
- ) -> Generator[Tuple[str, InstallRequirement], None, None]:
32
- for req in requirements:
33
- assert req.name, f"invalid to-be-installed requirement: {req}"
34
- yield req.name, req
35
-
36
-
37
- def install_given_reqs(
38
- requirements: List[InstallRequirement],
39
- global_options: Sequence[str],
40
- root: Optional[str],
41
- home: Optional[str],
42
- prefix: Optional[str],
43
- warn_script_location: bool,
44
- use_user_site: bool,
45
- pycompile: bool,
46
- ) -> List[InstallationResult]:
47
- """
48
- Install everything in the given list.
49
-
50
- (to be called after having downloaded and unpacked the packages)
51
- """
52
- to_install = collections.OrderedDict(_validate_requirements(requirements))
53
-
54
- if to_install:
55
- logger.info(
56
- "Installing collected packages: %s",
57
- ", ".join(to_install.keys()),
58
- )
59
-
60
- installed = []
61
-
62
- with indent_log():
63
- for req_name, requirement in to_install.items():
64
- if requirement.should_reinstall:
65
- logger.info("Attempting uninstall: %s", req_name)
66
- with indent_log():
67
- uninstalled_pathset = requirement.uninstall(auto_confirm=True)
68
- else:
69
- uninstalled_pathset = None
70
-
71
- try:
72
- requirement.install(
73
- global_options,
74
- root=root,
75
- home=home,
76
- prefix=prefix,
77
- warn_script_location=warn_script_location,
78
- use_user_site=use_user_site,
79
- pycompile=pycompile,
80
- )
81
- except Exception:
82
- # if install did not succeed, rollback previous uninstall
83
- if uninstalled_pathset and not requirement.install_succeeded:
84
- uninstalled_pathset.rollback()
85
- raise
86
- else:
87
- if uninstalled_pathset and requirement.install_succeeded:
88
- uninstalled_pathset.commit()
89
-
90
- installed.append(InstallationResult(req_name))
91
-
92
- return installed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/unpacking.py DELETED
@@ -1,257 +0,0 @@
1
- """Utilities related archives.
2
- """
3
-
4
- import logging
5
- import os
6
- import shutil
7
- import stat
8
- import tarfile
9
- import zipfile
10
- from typing import Iterable, List, Optional
11
- from zipfile import ZipInfo
12
-
13
- from pip._internal.exceptions import InstallationError
14
- from pip._internal.utils.filetypes import (
15
- BZ2_EXTENSIONS,
16
- TAR_EXTENSIONS,
17
- XZ_EXTENSIONS,
18
- ZIP_EXTENSIONS,
19
- )
20
- from pip._internal.utils.misc import ensure_dir
21
-
22
- logger = logging.getLogger(__name__)
23
-
24
-
25
- SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
26
-
27
- try:
28
- import bz2 # noqa
29
-
30
- SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
31
- except ImportError:
32
- logger.debug("bz2 module is not available")
33
-
34
- try:
35
- # Only for Python 3.3+
36
- import lzma # noqa
37
-
38
- SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
39
- except ImportError:
40
- logger.debug("lzma module is not available")
41
-
42
-
43
- def current_umask() -> int:
44
- """Get the current umask which involves having to set it temporarily."""
45
- mask = os.umask(0)
46
- os.umask(mask)
47
- return mask
48
-
49
-
50
- def split_leading_dir(path: str) -> List[str]:
51
- path = path.lstrip("/").lstrip("\\")
52
- if "/" in path and (
53
- ("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
54
- ):
55
- return path.split("/", 1)
56
- elif "\\" in path:
57
- return path.split("\\", 1)
58
- else:
59
- return [path, ""]
60
-
61
-
62
- def has_leading_dir(paths: Iterable[str]) -> bool:
63
- """Returns true if all the paths have the same leading path name
64
- (i.e., everything is in one subdirectory in an archive)"""
65
- common_prefix = None
66
- for path in paths:
67
- prefix, rest = split_leading_dir(path)
68
- if not prefix:
69
- return False
70
- elif common_prefix is None:
71
- common_prefix = prefix
72
- elif prefix != common_prefix:
73
- return False
74
- return True
75
-
76
-
77
- def is_within_directory(directory: str, target: str) -> bool:
78
- """
79
- Return true if the absolute path of target is within the directory
80
- """
81
- abs_directory = os.path.abspath(directory)
82
- abs_target = os.path.abspath(target)
83
-
84
- prefix = os.path.commonprefix([abs_directory, abs_target])
85
- return prefix == abs_directory
86
-
87
-
88
- def set_extracted_file_to_default_mode_plus_executable(path: str) -> None:
89
- """
90
- Make file present at path have execute for user/group/world
91
- (chmod +x) is no-op on windows per python docs
92
- """
93
- os.chmod(path, (0o777 & ~current_umask() | 0o111))
94
-
95
-
96
- def zip_item_is_executable(info: ZipInfo) -> bool:
97
- mode = info.external_attr >> 16
98
- # if mode and regular file and any execute permissions for
99
- # user/group/world?
100
- return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
101
-
102
-
103
- def unzip_file(filename: str, location: str, flatten: bool = True) -> None:
104
- """
105
- Unzip the file (with path `filename`) to the destination `location`. All
106
- files are written based on system defaults and umask (i.e. permissions are
107
- not preserved), except that regular file members with any execute
108
- permissions (user, group, or world) have "chmod +x" applied after being
109
- written. Note that for windows, any execute changes using os.chmod are
110
- no-ops per the python docs.
111
- """
112
- ensure_dir(location)
113
- zipfp = open(filename, "rb")
114
- try:
115
- zip = zipfile.ZipFile(zipfp, allowZip64=True)
116
- leading = has_leading_dir(zip.namelist()) and flatten
117
- for info in zip.infolist():
118
- name = info.filename
119
- fn = name
120
- if leading:
121
- fn = split_leading_dir(name)[1]
122
- fn = os.path.join(location, fn)
123
- dir = os.path.dirname(fn)
124
- if not is_within_directory(location, fn):
125
- message = (
126
- "The zip file ({}) has a file ({}) trying to install "
127
- "outside target directory ({})"
128
- )
129
- raise InstallationError(message.format(filename, fn, location))
130
- if fn.endswith("/") or fn.endswith("\\"):
131
- # A directory
132
- ensure_dir(fn)
133
- else:
134
- ensure_dir(dir)
135
- # Don't use read() to avoid allocating an arbitrarily large
136
- # chunk of memory for the file's content
137
- fp = zip.open(name)
138
- try:
139
- with open(fn, "wb") as destfp:
140
- shutil.copyfileobj(fp, destfp)
141
- finally:
142
- fp.close()
143
- if zip_item_is_executable(info):
144
- set_extracted_file_to_default_mode_plus_executable(fn)
145
- finally:
146
- zipfp.close()
147
-
148
-
149
- def untar_file(filename: str, location: str) -> None:
150
- """
151
- Untar the file (with path `filename`) to the destination `location`.
152
- All files are written based on system defaults and umask (i.e. permissions
153
- are not preserved), except that regular file members with any execute
154
- permissions (user, group, or world) have "chmod +x" applied after being
155
- written. Note that for windows, any execute changes using os.chmod are
156
- no-ops per the python docs.
157
- """
158
- ensure_dir(location)
159
- if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
160
- mode = "r:gz"
161
- elif filename.lower().endswith(BZ2_EXTENSIONS):
162
- mode = "r:bz2"
163
- elif filename.lower().endswith(XZ_EXTENSIONS):
164
- mode = "r:xz"
165
- elif filename.lower().endswith(".tar"):
166
- mode = "r"
167
- else:
168
- logger.warning(
169
- "Cannot determine compression type for file %s",
170
- filename,
171
- )
172
- mode = "r:*"
173
- tar = tarfile.open(filename, mode, encoding="utf-8")
174
- try:
175
- leading = has_leading_dir([member.name for member in tar.getmembers()])
176
- for member in tar.getmembers():
177
- fn = member.name
178
- if leading:
179
- fn = split_leading_dir(fn)[1]
180
- path = os.path.join(location, fn)
181
- if not is_within_directory(location, path):
182
- message = (
183
- "The tar file ({}) has a file ({}) trying to install "
184
- "outside target directory ({})"
185
- )
186
- raise InstallationError(message.format(filename, path, location))
187
- if member.isdir():
188
- ensure_dir(path)
189
- elif member.issym():
190
- try:
191
- tar._extract_member(member, path)
192
- except Exception as exc:
193
- # Some corrupt tar files seem to produce this
194
- # (specifically bad symlinks)
195
- logger.warning(
196
- "In the tar file %s the member %s is invalid: %s",
197
- filename,
198
- member.name,
199
- exc,
200
- )
201
- continue
202
- else:
203
- try:
204
- fp = tar.extractfile(member)
205
- except (KeyError, AttributeError) as exc:
206
- # Some corrupt tar files seem to produce this
207
- # (specifically bad symlinks)
208
- logger.warning(
209
- "In the tar file %s the member %s is invalid: %s",
210
- filename,
211
- member.name,
212
- exc,
213
- )
214
- continue
215
- ensure_dir(os.path.dirname(path))
216
- assert fp is not None
217
- with open(path, "wb") as destfp:
218
- shutil.copyfileobj(fp, destfp)
219
- fp.close()
220
- # Update the timestamp (useful for cython compiled files)
221
- tar.utime(member, path)
222
- # member have any execute permissions for user/group/world?
223
- if member.mode & 0o111:
224
- set_extracted_file_to_default_mode_plus_executable(path)
225
- finally:
226
- tar.close()
227
-
228
-
229
- def unpack_file(
230
- filename: str,
231
- location: str,
232
- content_type: Optional[str] = None,
233
- ) -> None:
234
- filename = os.path.realpath(filename)
235
- if (
236
- content_type == "application/zip"
237
- or filename.lower().endswith(ZIP_EXTENSIONS)
238
- or zipfile.is_zipfile(filename)
239
- ):
240
- unzip_file(filename, location, flatten=not filename.endswith(".whl"))
241
- elif (
242
- content_type == "application/x-gzip"
243
- or tarfile.is_tarfile(filename)
244
- or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
245
- ):
246
- untar_file(filename, location)
247
- else:
248
- # FIXME: handle?
249
- # FIXME: magic signatures?
250
- logger.critical(
251
- "Cannot unpack file %s (downloaded from %s, content-type: %s); "
252
- "cannot detect archive format",
253
- filename,
254
- location,
255
- content_type,
256
- )
257
- raise InstallationError(f"Cannot determine archive format of {location}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py DELETED
@@ -1,11 +0,0 @@
1
- from ..common.optim import SGD as optimizer
2
- from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
3
- from ..common.data.coco import dataloader
4
- from ..common.models.retinanet import model
5
- from ..common.train import train
6
-
7
- dataloader.train.mapper.use_instance_mask = False
8
- model.backbone.bottom_up.freeze_at = 2
9
- optimizer.lr = 0.01
10
-
11
- train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py DELETED
@@ -1,14 +0,0 @@
1
- from .mask_rcnn_R_50_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
-
9
- train.max_iter *= 2 # 100ep -> 200ep
10
-
11
- lr_multiplier.scheduler.milestones = [
12
- milestone * 2 for milestone in lr_multiplier.scheduler.milestones
13
- ]
14
- lr_multiplier.scheduler.num_updates = train.max_iter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/samplers/grouped_batch_sampler.py DELETED
@@ -1,47 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import numpy as np
3
- from torch.utils.data.sampler import BatchSampler, Sampler
4
-
5
-
6
- class GroupedBatchSampler(BatchSampler):
7
- """
8
- Wraps another sampler to yield a mini-batch of indices.
9
- It enforces that the batch only contain elements from the same group.
10
- It also tries to provide mini-batches which follows an ordering which is
11
- as close as possible to the ordering from the original sampler.
12
- """
13
-
14
- def __init__(self, sampler, group_ids, batch_size):
15
- """
16
- Args:
17
- sampler (Sampler): Base sampler.
18
- group_ids (list[int]): If the sampler produces indices in range [0, N),
19
- `group_ids` must be a list of `N` ints which contains the group id of each sample.
20
- The group ids must be a set of integers in the range [0, num_groups).
21
- batch_size (int): Size of mini-batch.
22
- """
23
- if not isinstance(sampler, Sampler):
24
- raise ValueError(
25
- "sampler should be an instance of "
26
- "torch.utils.data.Sampler, but got sampler={}".format(sampler)
27
- )
28
- self.sampler = sampler
29
- self.group_ids = np.asarray(group_ids)
30
- assert self.group_ids.ndim == 1
31
- self.batch_size = batch_size
32
- groups = np.unique(self.group_ids).tolist()
33
-
34
- # buffer the indices of each group until batch size is reached
35
- self.buffer_per_group = {k: [] for k in groups}
36
-
37
- def __iter__(self):
38
- for idx in self.sampler:
39
- group_id = self.group_ids[idx]
40
- group_buffer = self.buffer_per_group[group_id]
41
- group_buffer.append(idx)
42
- if len(group_buffer) == self.batch_size:
43
- yield group_buffer[:] # yield a copy of the list
44
- del group_buffer[:]
45
-
46
- def __len__(self):
47
- raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_sampler.py DELETED
@@ -1,111 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import itertools
3
- import math
4
- import operator
5
- import unittest
6
- import torch
7
- from torch.utils import data
8
- from torch.utils.data.sampler import SequentialSampler
9
-
10
- from detectron2.data.build import worker_init_reset_seed
11
- from detectron2.data.common import DatasetFromList, ToIterableDataset
12
- from detectron2.data.samplers import (
13
- GroupedBatchSampler,
14
- InferenceSampler,
15
- RepeatFactorTrainingSampler,
16
- TrainingSampler,
17
- )
18
- from detectron2.utils.env import seed_all_rng
19
-
20
-
21
- class TestGroupedBatchSampler(unittest.TestCase):
22
- def test_missing_group_id(self):
23
- sampler = SequentialSampler(list(range(100)))
24
- group_ids = [1] * 100
25
- samples = GroupedBatchSampler(sampler, group_ids, 2)
26
-
27
- for mini_batch in samples:
28
- self.assertEqual(len(mini_batch), 2)
29
-
30
- def test_groups(self):
31
- sampler = SequentialSampler(list(range(100)))
32
- group_ids = [1, 0] * 50
33
- samples = GroupedBatchSampler(sampler, group_ids, 2)
34
-
35
- for mini_batch in samples:
36
- self.assertEqual((mini_batch[0] + mini_batch[1]) % 2, 0)
37
-
38
-
39
- class TestSamplerDeterministic(unittest.TestCase):
40
- def test_to_iterable(self):
41
- sampler = TrainingSampler(100, seed=10)
42
- gt_output = list(itertools.islice(sampler, 100))
43
- self.assertEqual(set(gt_output), set(range(100)))
44
-
45
- dataset = DatasetFromList(list(range(100)))
46
- dataset = ToIterableDataset(dataset, sampler)
47
- data_loader = data.DataLoader(dataset, num_workers=0, collate_fn=operator.itemgetter(0))
48
-
49
- output = list(itertools.islice(data_loader, 100))
50
- self.assertEqual(output, gt_output)
51
-
52
- data_loader = data.DataLoader(
53
- dataset,
54
- num_workers=2,
55
- collate_fn=operator.itemgetter(0),
56
- worker_init_fn=worker_init_reset_seed,
57
- # reset seed should not affect behavior of TrainingSampler
58
- )
59
- output = list(itertools.islice(data_loader, 100))
60
- # multiple workers should not lead to duplicate or different data
61
- self.assertEqual(output, gt_output)
62
-
63
- def test_training_sampler_seed(self):
64
- seed_all_rng(42)
65
- sampler = TrainingSampler(30)
66
- data = list(itertools.islice(sampler, 65))
67
-
68
- seed_all_rng(42)
69
- sampler = TrainingSampler(30)
70
- seed_all_rng(999) # should be ineffective
71
- data2 = list(itertools.islice(sampler, 65))
72
- self.assertEqual(data, data2)
73
-
74
-
75
- class TestRepeatFactorTrainingSampler(unittest.TestCase):
76
- def test_repeat_factors_from_category_frequency(self):
77
- repeat_thresh = 0.5
78
-
79
- dataset_dicts = [
80
- {"annotations": [{"category_id": 0}, {"category_id": 1}]},
81
- {"annotations": [{"category_id": 0}]},
82
- {"annotations": []},
83
- ]
84
-
85
- rep_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
86
- dataset_dicts, repeat_thresh
87
- )
88
-
89
- expected_rep_factors = torch.tensor([math.sqrt(3 / 2), 1.0, 1.0])
90
- self.assertTrue(torch.allclose(rep_factors, expected_rep_factors))
91
-
92
-
93
- class TestInferenceSampler(unittest.TestCase):
94
- def test_local_indices(self):
95
- sizes = [0, 16, 2, 42]
96
- world_sizes = [5, 2, 3, 4]
97
-
98
- expected_results = [
99
- [range(0) for _ in range(5)],
100
- [range(8), range(8, 16)],
101
- [range(1), range(1, 2), range(0)],
102
- [range(11), range(11, 22), range(22, 32), range(32, 42)],
103
- ]
104
-
105
- for size, world_size, expected_result in zip(sizes, world_sizes, expected_results):
106
- with self.subTest(f"size={size}, world_size={world_size}"):
107
- local_indices = [
108
- InferenceSampler._get_local_indices(size, world_size, r)
109
- for r in range(world_size)
110
- ]
111
- self.assertEqual(local_indices, expected_result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BaiyuS/Real-CUGAN-YZ/upcunet_v3.py DELETED
@@ -1,714 +0,0 @@
1
- import torch
2
- from torch import nn as nn
3
- from torch.nn import functional as F
4
- import os, sys
5
- import numpy as np
6
-
7
- root_path = os.path.abspath('.')
8
- sys.path.append(root_path)
9
-
10
-
11
- class SEBlock(nn.Module):
12
- def __init__(self, in_channels, reduction=8, bias=False):
13
- super(SEBlock, self).__init__()
14
- self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
15
- self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
16
-
17
- def forward(self, x):
18
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
19
- x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
20
- else:
21
- x0 = torch.mean(x, dim=(2, 3), keepdim=True)
22
- x0 = self.conv1(x0)
23
- x0 = F.relu(x0, inplace=True)
24
- x0 = self.conv2(x0)
25
- x0 = torch.sigmoid(x0)
26
- x = torch.mul(x, x0)
27
- return x
28
-
29
- def forward_mean(self, x, x0):
30
- x0 = self.conv1(x0)
31
- x0 = F.relu(x0, inplace=True)
32
- x0 = self.conv2(x0)
33
- x0 = torch.sigmoid(x0)
34
- x = torch.mul(x, x0)
35
- return x
36
-
37
-
38
- class UNetConv(nn.Module):
39
- def __init__(self, in_channels, mid_channels, out_channels, se):
40
- super(UNetConv, self).__init__()
41
- self.conv = nn.Sequential(
42
- nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
43
- nn.LeakyReLU(0.1, inplace=True),
44
- nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
45
- nn.LeakyReLU(0.1, inplace=True),
46
- )
47
- if se:
48
- self.seblock = SEBlock(out_channels, reduction=8, bias=True)
49
- else:
50
- self.seblock = None
51
-
52
- def forward(self, x):
53
- z = self.conv(x)
54
- if self.seblock is not None:
55
- z = self.seblock(z)
56
- return z
57
-
58
-
59
- class UNet1(nn.Module):
60
- def __init__(self, in_channels, out_channels, deconv):
61
- super(UNet1, self).__init__()
62
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
63
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
64
- self.conv2 = UNetConv(64, 128, 64, se=True)
65
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
66
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
67
-
68
- if deconv:
69
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
70
- else:
71
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
72
-
73
- for m in self.modules():
74
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
75
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
76
- elif isinstance(m, nn.Linear):
77
- nn.init.normal_(m.weight, 0, 0.01)
78
- if m.bias is not None:
79
- nn.init.constant_(m.bias, 0)
80
-
81
- def forward(self, x):
82
- x1 = self.conv1(x)
83
- x2 = self.conv1_down(x1)
84
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
85
- x2 = self.conv2(x2)
86
- x2 = self.conv2_up(x2)
87
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
88
-
89
- x1 = F.pad(x1, (-4, -4, -4, -4))
90
- x3 = self.conv3(x1 + x2)
91
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
92
- z = self.conv_bottom(x3)
93
- return z
94
-
95
- def forward_a(self, x):
96
- x1 = self.conv1(x)
97
- x2 = self.conv1_down(x1)
98
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
99
- x2 = self.conv2.conv(x2)
100
- return x1, x2
101
-
102
- def forward_b(self, x1, x2):
103
- x2 = self.conv2_up(x2)
104
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
105
-
106
- x1 = F.pad(x1, (-4, -4, -4, -4))
107
- x3 = self.conv3(x1 + x2)
108
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
109
- z = self.conv_bottom(x3)
110
- return z
111
-
112
-
113
- class UNet1x3(nn.Module):
114
- def __init__(self, in_channels, out_channels, deconv):
115
- super(UNet1x3, self).__init__()
116
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
117
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
118
- self.conv2 = UNetConv(64, 128, 64, se=True)
119
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
120
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
121
-
122
- if deconv:
123
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
124
- else:
125
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
126
-
127
- for m in self.modules():
128
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
129
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
130
- elif isinstance(m, nn.Linear):
131
- nn.init.normal_(m.weight, 0, 0.01)
132
- if m.bias is not None:
133
- nn.init.constant_(m.bias, 0)
134
-
135
- def forward(self, x):
136
- x1 = self.conv1(x)
137
- x2 = self.conv1_down(x1)
138
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
139
- x2 = self.conv2(x2)
140
- x2 = self.conv2_up(x2)
141
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
142
-
143
- x1 = F.pad(x1, (-4, -4, -4, -4))
144
- x3 = self.conv3(x1 + x2)
145
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
146
- z = self.conv_bottom(x3)
147
- return z
148
-
149
- def forward_a(self, x):
150
- x1 = self.conv1(x)
151
- x2 = self.conv1_down(x1)
152
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
153
- x2 = self.conv2.conv(x2)
154
- return x1, x2
155
-
156
- def forward_b(self, x1, x2):
157
- x2 = self.conv2_up(x2)
158
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
159
-
160
- x1 = F.pad(x1, (-4, -4, -4, -4))
161
- x3 = self.conv3(x1 + x2)
162
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
163
- z = self.conv_bottom(x3)
164
- return z
165
-
166
-
167
- class UNet2(nn.Module):
168
- def __init__(self, in_channels, out_channels, deconv):
169
- super(UNet2, self).__init__()
170
-
171
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
172
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
173
- self.conv2 = UNetConv(64, 64, 128, se=True)
174
- self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
175
- self.conv3 = UNetConv(128, 256, 128, se=True)
176
- self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
177
- self.conv4 = UNetConv(128, 64, 64, se=True)
178
- self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
179
- self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
180
-
181
- if deconv:
182
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
183
- else:
184
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
185
-
186
- for m in self.modules():
187
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
188
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
189
- elif isinstance(m, nn.Linear):
190
- nn.init.normal_(m.weight, 0, 0.01)
191
- if m.bias is not None:
192
- nn.init.constant_(m.bias, 0)
193
-
194
- def forward(self, x):
195
- x1 = self.conv1(x)
196
- x2 = self.conv1_down(x1)
197
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
198
- x2 = self.conv2(x2)
199
-
200
- x3 = self.conv2_down(x2)
201
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
202
- x3 = self.conv3(x3)
203
- x3 = self.conv3_up(x3)
204
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
205
-
206
- x2 = F.pad(x2, (-4, -4, -4, -4))
207
- x4 = self.conv4(x2 + x3)
208
- x4 = self.conv4_up(x4)
209
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
210
-
211
- x1 = F.pad(x1, (-16, -16, -16, -16))
212
- x5 = self.conv5(x1 + x4)
213
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
214
-
215
- z = self.conv_bottom(x5)
216
- return z
217
-
218
- def forward_a(self, x): # conv234结尾有se
219
- x1 = self.conv1(x)
220
- x2 = self.conv1_down(x1)
221
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
222
- x2 = self.conv2.conv(x2)
223
- return x1, x2
224
-
225
- def forward_b(self, x2): # conv234结尾有se
226
- x3 = self.conv2_down(x2)
227
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
228
- x3 = self.conv3.conv(x3)
229
- return x3
230
-
231
- def forward_c(self, x2, x3): # conv234结尾有se
232
- x3 = self.conv3_up(x3)
233
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
234
-
235
- x2 = F.pad(x2, (-4, -4, -4, -4))
236
- x4 = self.conv4.conv(x2 + x3)
237
- return x4
238
-
239
- def forward_d(self, x1, x4): # conv234结尾有se
240
- x4 = self.conv4_up(x4)
241
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
242
-
243
- x1 = F.pad(x1, (-16, -16, -16, -16))
244
- x5 = self.conv5(x1 + x4)
245
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
246
-
247
- z = self.conv_bottom(x5)
248
- return z
249
-
250
-
251
- class UpCunet2x(nn.Module): # 完美tile,全程无损
252
- def __init__(self, in_channels=3, out_channels=3):
253
- super(UpCunet2x, self).__init__()
254
- self.unet1 = UNet1(in_channels, out_channels, deconv=True)
255
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
256
-
257
- def forward(self, x, tile_mode): # 1.7G
258
- n, c, h0, w0 = x.shape
259
- if (tile_mode == 0): # 不tile
260
- ph = ((h0 - 1) // 2 + 1) * 2
261
- pw = ((w0 - 1) // 2 + 1) * 2
262
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
263
- x = self.unet1.forward(x)
264
- x0 = self.unet2.forward(x)
265
- x1 = F.pad(x, (-20, -20, -20, -20))
266
- x = torch.add(x0, x1)
267
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
268
- return x
269
- elif (tile_mode == 1): # 对长边减半
270
- if (w0 >= h0):
271
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
272
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
273
- else:
274
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
275
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
276
- crop_size = (crop_size_h, crop_size_w) # 6.6G
277
- elif (tile_mode == 2): # hw都减半
278
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
279
- elif (tile_mode == 3): # hw都三分之一
280
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
281
- elif (tile_mode == 4): # hw都四分���一
282
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
283
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
284
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
285
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
286
- n, c, h, w = x.shape
287
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
288
- if ("Half" in x.type()):
289
- se_mean0 = se_mean0.half()
290
- n_patch = 0
291
- tmp_dict = {}
292
- opt_res_dict = {}
293
- for i in range(0, h - 36, crop_size[0]):
294
- tmp_dict[i] = {}
295
- for j in range(0, w - 36, crop_size[1]):
296
- x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
297
- n, c1, h1, w1 = x_crop.shape
298
- tmp0, x_crop = self.unet1.forward_a(x_crop)
299
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
300
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
301
- else:
302
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
303
- se_mean0 += tmp_se_mean
304
- n_patch += 1
305
- tmp_dict[i][j] = (tmp0, x_crop)
306
- se_mean0 /= n_patch
307
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
308
- if ("Half" in x.type()):
309
- se_mean1 = se_mean1.half()
310
- for i in range(0, h - 36, crop_size[0]):
311
- for j in range(0, w - 36, crop_size[1]):
312
- tmp0, x_crop = tmp_dict[i][j]
313
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
314
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
315
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
316
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
317
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
318
- else:
319
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
320
- se_mean1 += tmp_se_mean
321
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
322
- se_mean1 /= n_patch
323
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
324
- if ("Half" in x.type()):
325
- se_mean0 = se_mean0.half()
326
- for i in range(0, h - 36, crop_size[0]):
327
- for j in range(0, w - 36, crop_size[1]):
328
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
329
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
330
- tmp_x3 = self.unet2.forward_b(tmp_x2)
331
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
332
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
333
- else:
334
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
335
- se_mean0 += tmp_se_mean
336
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
337
- se_mean0 /= n_patch
338
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
339
- if ("Half" in x.type()):
340
- se_mean1 = se_mean1.half()
341
- for i in range(0, h - 36, crop_size[0]):
342
- for j in range(0, w - 36, crop_size[1]):
343
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
344
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
345
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
346
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
347
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
348
- else:
349
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
350
- se_mean1 += tmp_se_mean
351
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
352
- se_mean1 /= n_patch
353
- for i in range(0, h - 36, crop_size[0]):
354
- opt_res_dict[i] = {}
355
- for j in range(0, w - 36, crop_size[1]):
356
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
357
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
358
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
359
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
360
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
361
- opt_res_dict[i][j] = x_crop
362
- del tmp_dict
363
- torch.cuda.empty_cache()
364
- res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
365
- if ("Half" in x.type()):
366
- res = res.half()
367
- for i in range(0, h - 36, crop_size[0]):
368
- for j in range(0, w - 36, crop_size[1]):
369
- res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
370
- del opt_res_dict
371
- torch.cuda.empty_cache()
372
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
373
- return res #
374
-
375
-
376
- class UpCunet3x(nn.Module): # 完美tile,全程无损
377
- def __init__(self, in_channels=3, out_channels=3):
378
- super(UpCunet3x, self).__init__()
379
- self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
380
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
381
-
382
- def forward(self, x, tile_mode): # 1.7G
383
- n, c, h0, w0 = x.shape
384
- if (tile_mode == 0): # 不tile
385
- ph = ((h0 - 1) // 4 + 1) * 4
386
- pw = ((w0 - 1) // 4 + 1) * 4
387
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
388
- x = self.unet1.forward(x)
389
- x0 = self.unet2.forward(x)
390
- x1 = F.pad(x, (-20, -20, -20, -20))
391
- x = torch.add(x0, x1)
392
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
393
- return x
394
- elif (tile_mode == 1): # 对长边减半
395
- if (w0 >= h0):
396
- crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
397
- crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
398
- else:
399
- crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
400
- crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
401
- crop_size = (crop_size_h, crop_size_w) # 6.6G
402
- elif (tile_mode == 2): # hw都减半
403
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
404
- elif (tile_mode == 3): # hw都三分之一
405
- crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
406
- elif (tile_mode == 4): # hw都四分之一
407
- crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
408
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
409
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
410
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
411
- n, c, h, w = x.shape
412
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
413
- if ("Half" in x.type()):
414
- se_mean0 = se_mean0.half()
415
- n_patch = 0
416
- tmp_dict = {}
417
- opt_res_dict = {}
418
- for i in range(0, h - 28, crop_size[0]):
419
- tmp_dict[i] = {}
420
- for j in range(0, w - 28, crop_size[1]):
421
- x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
422
- n, c1, h1, w1 = x_crop.shape
423
- tmp0, x_crop = self.unet1.forward_a(x_crop)
424
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
425
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
426
- else:
427
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
428
- se_mean0 += tmp_se_mean
429
- n_patch += 1
430
- tmp_dict[i][j] = (tmp0, x_crop)
431
- se_mean0 /= n_patch
432
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
433
- if ("Half" in x.type()):
434
- se_mean1 = se_mean1.half()
435
- for i in range(0, h - 28, crop_size[0]):
436
- for j in range(0, w - 28, crop_size[1]):
437
- tmp0, x_crop = tmp_dict[i][j]
438
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
439
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
440
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
441
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
442
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
443
- else:
444
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
445
- se_mean1 += tmp_se_mean
446
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
447
- se_mean1 /= n_patch
448
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
449
- if ("Half" in x.type()):
450
- se_mean0 = se_mean0.half()
451
- for i in range(0, h - 28, crop_size[0]):
452
- for j in range(0, w - 28, crop_size[1]):
453
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
454
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
455
- tmp_x3 = self.unet2.forward_b(tmp_x2)
456
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
457
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
458
- else:
459
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
460
- se_mean0 += tmp_se_mean
461
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
462
- se_mean0 /= n_patch
463
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
464
- if ("Half" in x.type()):
465
- se_mean1 = se_mean1.half()
466
- for i in range(0, h - 28, crop_size[0]):
467
- for j in range(0, w - 28, crop_size[1]):
468
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
469
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
470
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
471
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
472
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
473
- else:
474
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
475
- se_mean1 += tmp_se_mean
476
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
477
- se_mean1 /= n_patch
478
- for i in range(0, h - 28, crop_size[0]):
479
- opt_res_dict[i] = {}
480
- for j in range(0, w - 28, crop_size[1]):
481
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
482
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
483
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
484
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
485
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
486
- opt_res_dict[i][j] = x_crop #
487
- del tmp_dict
488
- torch.cuda.empty_cache()
489
- res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
490
- if ("Half" in x.type()):
491
- res = res.half()
492
- for i in range(0, h - 28, crop_size[0]):
493
- for j in range(0, w - 28, crop_size[1]):
494
- res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
495
- del opt_res_dict
496
- torch.cuda.empty_cache()
497
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
498
- return res
499
-
500
-
501
- class UpCunet4x(nn.Module): # 完美tile,全程无损
502
- def __init__(self, in_channels=3, out_channels=3):
503
- super(UpCunet4x, self).__init__()
504
- self.unet1 = UNet1(in_channels, 64, deconv=True)
505
- self.unet2 = UNet2(64, 64, deconv=False)
506
- self.ps = nn.PixelShuffle(2)
507
- self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
508
-
509
- def forward(self, x, tile_mode):
510
- n, c, h0, w0 = x.shape
511
- x00 = x
512
- if (tile_mode == 0): # 不tile
513
- ph = ((h0 - 1) // 2 + 1) * 2
514
- pw = ((w0 - 1) // 2 + 1) * 2
515
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
516
- x = self.unet1.forward(x)
517
- x0 = self.unet2.forward(x)
518
- x1 = F.pad(x, (-20, -20, -20, -20))
519
- x = torch.add(x0, x1)
520
- x = self.conv_final(x)
521
- x = F.pad(x, (-1, -1, -1, -1))
522
- x = self.ps(x)
523
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
524
- x += F.interpolate(x00, scale_factor=4, mode='nearest')
525
- return x
526
- elif (tile_mode == 1): # 对长边减半
527
- if (w0 >= h0):
528
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
529
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
530
- else:
531
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
532
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
533
- crop_size = (crop_size_h, crop_size_w) # 6.6G
534
- elif (tile_mode == 2): # hw都减半
535
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
536
- elif (tile_mode == 3): # hw都三分之一
537
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
538
- elif (tile_mode == 4): # hw都四分之一
539
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
540
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
541
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
542
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
543
- n, c, h, w = x.shape
544
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
545
- if ("Half" in x.type()):
546
- se_mean0 = se_mean0.half()
547
- n_patch = 0
548
- tmp_dict = {}
549
- opt_res_dict = {}
550
- for i in range(0, h - 38, crop_size[0]):
551
- tmp_dict[i] = {}
552
- for j in range(0, w - 38, crop_size[1]):
553
- x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
554
- n, c1, h1, w1 = x_crop.shape
555
- tmp0, x_crop = self.unet1.forward_a(x_crop)
556
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
557
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
558
- else:
559
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
560
- se_mean0 += tmp_se_mean
561
- n_patch += 1
562
- tmp_dict[i][j] = (tmp0, x_crop)
563
- se_mean0 /= n_patch
564
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
565
- if ("Half" in x.type()):
566
- se_mean1 = se_mean1.half()
567
- for i in range(0, h - 38, crop_size[0]):
568
- for j in range(0, w - 38, crop_size[1]):
569
- tmp0, x_crop = tmp_dict[i][j]
570
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
571
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
572
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
573
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
574
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
575
- else:
576
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
577
- se_mean1 += tmp_se_mean
578
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
579
- se_mean1 /= n_patch
580
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
581
- if ("Half" in x.type()):
582
- se_mean0 = se_mean0.half()
583
- for i in range(0, h - 38, crop_size[0]):
584
- for j in range(0, w - 38, crop_size[1]):
585
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
586
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
587
- tmp_x3 = self.unet2.forward_b(tmp_x2)
588
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
589
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
590
- else:
591
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
592
- se_mean0 += tmp_se_mean
593
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
594
- se_mean0 /= n_patch
595
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
596
- if ("Half" in x.type()):
597
- se_mean1 = se_mean1.half()
598
- for i in range(0, h - 38, crop_size[0]):
599
- for j in range(0, w - 38, crop_size[1]):
600
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
601
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
602
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
603
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
604
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
605
- else:
606
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
607
- se_mean1 += tmp_se_mean
608
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
609
- se_mean1 /= n_patch
610
- for i in range(0, h - 38, crop_size[0]):
611
- opt_res_dict[i] = {}
612
- for j in range(0, w - 38, crop_size[1]):
613
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
614
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
615
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
616
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
617
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
618
- x_crop = self.conv_final(x_crop)
619
- x_crop = F.pad(x_crop, (-1, -1, -1, -1))
620
- x_crop = self.ps(x_crop)
621
- opt_res_dict[i][j] = x_crop
622
- del tmp_dict
623
- torch.cuda.empty_cache()
624
- res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
625
- if ("Half" in x.type()):
626
- res = res.half()
627
- for i in range(0, h - 38, crop_size[0]):
628
- for j in range(0, w - 38, crop_size[1]):
629
- # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
630
- res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
631
- del opt_res_dict
632
- torch.cuda.empty_cache()
633
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
634
- res += F.interpolate(x00, scale_factor=4, mode='nearest')
635
- return res #
636
-
637
-
638
- class RealWaifuUpScaler(object):
639
- def __init__(self, scale, weight_path, half, device):
640
- weight = torch.load(weight_path, map_location="cpu")
641
- self.model = eval("UpCunet%sx" % scale)()
642
- if (half == True):
643
- self.model = self.model.half().to(device)
644
- else:
645
- self.model = self.model.to(device)
646
- self.model.load_state_dict(weight, strict=True)
647
- self.model.eval()
648
- self.half = half
649
- self.device = device
650
-
651
- def np2tensor(self, np_frame):
652
- if (self.half == False):
653
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
654
- else:
655
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
656
-
657
- def tensor2np(self, tensor):
658
- if (self.half == False):
659
- return (
660
- np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
661
- else:
662
- return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
663
- (1, 2, 0)))
664
-
665
- def __call__(self, frame, tile_mode):
666
- with torch.no_grad():
667
- tensor = self.np2tensor(frame)
668
- result = self.tensor2np(self.model(tensor, tile_mode))
669
- return result
670
-
671
-
672
- if __name__ == "__main__":
673
- ###########inference_img
674
- import time, cv2, sys
675
- from time import time as ttime
676
-
677
- for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
678
- ("weights_v3/up4x-latest-denoise3x.pth", 4)]:
679
- for tile_mode in [0, 1, 2, 3, 4]:
680
- upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
681
- input_dir = "%s/input_dir1" % root_path
682
- output_dir = "%s/opt-dir-all-test" % root_path
683
- os.makedirs(output_dir, exist_ok=True)
684
- for name in os.listdir(input_dir):
685
- print(name)
686
- tmp = name.split(".")
687
- inp_path = os.path.join(input_dir, name)
688
- suffix = tmp[-1]
689
- prefix = ".".join(tmp[:-1])
690
- tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
691
- print(inp_path, tmp_path)
692
- # 支持中文路径
693
- # os.link(inp_path, tmp_path)#win用硬链接
694
- os.symlink(inp_path, tmp_path) # linux用软链接
695
- frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
696
- t0 = ttime()
697
- result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
698
- t1 = ttime()
699
- print(prefix, "done", t1 - t0)
700
- tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
701
- cv2.imwrite(tmp_opt_path, result)
702
- n = 0
703
- while (1):
704
- if (n == 0):
705
- suffix = "_%sx_tile%s.png" % (scale, tile_mode)
706
- else:
707
- suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
708
- if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
709
- break
710
- else:
711
- n += 1
712
- final_opt_path = os.path.join(output_dir, prefix + suffix)
713
- os.rename(tmp_opt_path, final_opt_path)
714
- os.remove(tmp_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BertChristiaens/youtube-dl/app.py DELETED
@@ -1,72 +0,0 @@
1
- """This is the main module of the streamlit app that allows the user to download youtube videos as mp3 files."""
2
- import streamlit as st
3
- from yt_dlp import YoutubeDL
4
- import os
5
- from io import BytesIO
6
- from datetime import datetime
7
-
8
- URLS = ['https://www.youtube.com/watch?v=BaW_jenozKc']
9
-
10
-
11
- ydl_opts = {
12
- 'format': 'bestaudio/best',
13
- 'postprocessors': [{
14
- 'key': 'FFmpegExtractAudio',
15
- 'preferredcodec': 'mp3',
16
- 'preferredquality': '192',
17
- }],
18
- 'outtmpl': 'audio'
19
- }
20
-
21
- def download_video(url):
22
- with YoutubeDL(ydl_opts) as ydl:
23
- print(url)
24
- error_code = ydl.download([url])
25
- info = ydl.extract_info(url, download=False)
26
- print(error_code)
27
- return error_code, info
28
-
29
- def clean_files():
30
- if os.path.isfile('audio'):
31
- os.remove('audio')
32
- if os.path.isfile('audio.mp3'):
33
- os.remove('audio.mp3')
34
-
35
-
36
- def main():
37
- """This method has a text input field, radio button and a button for downloading the video as mp3."""
38
- st.title('Youtube to mp3')
39
- st.write('Enter the url of the youtube video you want to download')
40
- url = st.text_input('URL')
41
-
42
- if st.button('Download video'):
43
- with st.spinner('Downloading video'):
44
- clean_files()
45
-
46
- error_code, info = download_video(url)
47
-
48
- st.session_state['latest_video'] = url
49
- st.session_state['latest_title'] = info['fulltitle']
50
-
51
- if error_code:
52
- st.error('Error downloading video')
53
- else:
54
- st.success('Downloaded video')
55
-
56
- if os.path.isfile('audio.mp3') and st.session_state.get('latest_video'):
57
- video_url = st.session_state.get('latest_video', '/')
58
- video_title = st.session_state.get('latest_title', '/')
59
-
60
- st.write(f"Last downloaded video is: {video_title} with url {video_url}")
61
- st.audio('audio.mp3')
62
- buffer = BytesIO()
63
- with open('audio.mp3', 'rb') as f:
64
- buffer.write(f.read())
65
- timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
66
- st.download_button(label='Download mp3',
67
- data=buffer.getvalue(),
68
- file_name=f"{video_title.replace(' ', '-')}.mp3",
69
- mime="audio/mp3")
70
-
71
- if __name__ == '__main__':
72
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/lib/shareConversation.ts DELETED
@@ -1,27 +0,0 @@
1
- import { base } from "$app/paths";
2
- import { ERROR_MESSAGES, error } from "$lib/stores/errors";
3
- import { share } from "./utils/share";
4
-
5
- export async function shareConversation(id: string, title: string) {
6
- try {
7
- const res = await fetch(`${base}/conversation/${id}/share`, {
8
- method: "POST",
9
- headers: {
10
- "Content-Type": "application/json",
11
- },
12
- });
13
-
14
- if (!res.ok) {
15
- error.set("Error while sharing conversation, try again.");
16
- console.error("Error while sharing conversation: " + (await res.text()));
17
- return;
18
- }
19
-
20
- const { url } = await res.json();
21
-
22
- share(url, title);
23
- } catch (err) {
24
- error.set(ERROR_MESSAGES.default);
25
- console.error(err);
26
- }
27
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_wrap.py DELETED
@@ -1,56 +0,0 @@
1
- import re
2
- from typing import Iterable, List, Tuple
3
-
4
- from ._loop import loop_last
5
- from .cells import cell_len, chop_cells
6
-
7
- re_word = re.compile(r"\s*\S+\s*")
8
-
9
-
10
- def words(text: str) -> Iterable[Tuple[int, int, str]]:
11
- position = 0
12
- word_match = re_word.match(text, position)
13
- while word_match is not None:
14
- start, end = word_match.span()
15
- word = word_match.group(0)
16
- yield start, end, word
17
- word_match = re_word.match(text, end)
18
-
19
-
20
- def divide_line(text: str, width: int, fold: bool = True) -> List[int]:
21
- divides: List[int] = []
22
- append = divides.append
23
- line_position = 0
24
- _cell_len = cell_len
25
- for start, _end, word in words(text):
26
- word_length = _cell_len(word.rstrip())
27
- if line_position + word_length > width:
28
- if word_length > width:
29
- if fold:
30
- chopped_words = chop_cells(word, max_size=width, position=0)
31
- for last, line in loop_last(chopped_words):
32
- if start:
33
- append(start)
34
-
35
- if last:
36
- line_position = _cell_len(line)
37
- else:
38
- start += len(line)
39
- else:
40
- if start:
41
- append(start)
42
- line_position = _cell_len(word)
43
- elif line_position and start:
44
- append(start)
45
- line_position = _cell_len(word)
46
- else:
47
- line_position += _cell_len(word)
48
- return divides
49
-
50
-
51
- if __name__ == "__main__": # pragma: no cover
52
- from .console import Console
53
-
54
- console = Console(width=10)
55
- console.print("12345 abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ 12345")
56
- print(chop_cells("abcdefghijklmnopqrstuvwxyz", 10, position=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/bar.py DELETED
@@ -1,94 +0,0 @@
1
- from typing import Optional, Union
2
-
3
- from .color import Color
4
- from .console import Console, ConsoleOptions, RenderResult
5
- from .jupyter import JupyterMixin
6
- from .measure import Measurement
7
- from .segment import Segment
8
- from .style import Style
9
-
10
- # There are left-aligned characters for 1/8 to 7/8, but
11
- # the right-aligned characters exist only for 1/8 and 4/8.
12
- BEGIN_BLOCK_ELEMENTS = ["█", "█", "█", "▐", "▐", "▐", "▕", "▕"]
13
- END_BLOCK_ELEMENTS = [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉"]
14
- FULL_BLOCK = "█"
15
-
16
-
17
- class Bar(JupyterMixin):
18
- """Renders a solid block bar.
19
-
20
- Args:
21
- size (float): Value for the end of the bar.
22
- begin (float): Begin point (between 0 and size, inclusive).
23
- end (float): End point (between 0 and size, inclusive).
24
- width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
25
- color (Union[Color, str], optional): Color of the bar. Defaults to "default".
26
- bgcolor (Union[Color, str], optional): Color of bar background. Defaults to "default".
27
- """
28
-
29
- def __init__(
30
- self,
31
- size: float,
32
- begin: float,
33
- end: float,
34
- *,
35
- width: Optional[int] = None,
36
- color: Union[Color, str] = "default",
37
- bgcolor: Union[Color, str] = "default",
38
- ):
39
- self.size = size
40
- self.begin = max(begin, 0)
41
- self.end = min(end, size)
42
- self.width = width
43
- self.style = Style(color=color, bgcolor=bgcolor)
44
-
45
- def __repr__(self) -> str:
46
- return f"Bar({self.size}, {self.begin}, {self.end})"
47
-
48
- def __rich_console__(
49
- self, console: Console, options: ConsoleOptions
50
- ) -> RenderResult:
51
-
52
- width = min(
53
- self.width if self.width is not None else options.max_width,
54
- options.max_width,
55
- )
56
-
57
- if self.begin >= self.end:
58
- yield Segment(" " * width, self.style)
59
- yield Segment.line()
60
- return
61
-
62
- prefix_complete_eights = int(width * 8 * self.begin / self.size)
63
- prefix_bar_count = prefix_complete_eights // 8
64
- prefix_eights_count = prefix_complete_eights % 8
65
-
66
- body_complete_eights = int(width * 8 * self.end / self.size)
67
- body_bar_count = body_complete_eights // 8
68
- body_eights_count = body_complete_eights % 8
69
-
70
- # When start and end fall into the same cell, we ideally should render
71
- # a symbol that's "center-aligned", but there is no good symbol in Unicode.
72
- # In this case, we fall back to right-aligned block symbol for simplicity.
73
-
74
- prefix = " " * prefix_bar_count
75
- if prefix_eights_count:
76
- prefix += BEGIN_BLOCK_ELEMENTS[prefix_eights_count]
77
-
78
- body = FULL_BLOCK * body_bar_count
79
- if body_eights_count:
80
- body += END_BLOCK_ELEMENTS[body_eights_count]
81
-
82
- suffix = " " * (width - len(body))
83
-
84
- yield Segment(prefix + body[len(prefix) :] + suffix, self.style)
85
- yield Segment.line()
86
-
87
- def __rich_measure__(
88
- self, console: Console, options: ConsoleOptions
89
- ) -> Measurement:
90
- return (
91
- Measurement(self.width, self.width)
92
- if self.width is not None
93
- else Measurement(4, options.max_width)
94
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/exceptions.py DELETED
@@ -1,323 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
4
-
5
- # Base Exceptions
6
-
7
-
8
- class HTTPError(Exception):
9
- """Base exception used by this module."""
10
-
11
- pass
12
-
13
-
14
- class HTTPWarning(Warning):
15
- """Base warning used by this module."""
16
-
17
- pass
18
-
19
-
20
- class PoolError(HTTPError):
21
- """Base exception for errors caused within a pool."""
22
-
23
- def __init__(self, pool, message):
24
- self.pool = pool
25
- HTTPError.__init__(self, "%s: %s" % (pool, message))
26
-
27
- def __reduce__(self):
28
- # For pickling purposes.
29
- return self.__class__, (None, None)
30
-
31
-
32
- class RequestError(PoolError):
33
- """Base exception for PoolErrors that have associated URLs."""
34
-
35
- def __init__(self, pool, url, message):
36
- self.url = url
37
- PoolError.__init__(self, pool, message)
38
-
39
- def __reduce__(self):
40
- # For pickling purposes.
41
- return self.__class__, (None, self.url, None)
42
-
43
-
44
- class SSLError(HTTPError):
45
- """Raised when SSL certificate fails in an HTTPS connection."""
46
-
47
- pass
48
-
49
-
50
- class ProxyError(HTTPError):
51
- """Raised when the connection to a proxy fails."""
52
-
53
- def __init__(self, message, error, *args):
54
- super(ProxyError, self).__init__(message, error, *args)
55
- self.original_error = error
56
-
57
-
58
- class DecodeError(HTTPError):
59
- """Raised when automatic decoding based on Content-Type fails."""
60
-
61
- pass
62
-
63
-
64
- class ProtocolError(HTTPError):
65
- """Raised when something unexpected happens mid-request/response."""
66
-
67
- pass
68
-
69
-
70
- #: Renamed to ProtocolError but aliased for backwards compatibility.
71
- ConnectionError = ProtocolError
72
-
73
-
74
- # Leaf Exceptions
75
-
76
-
77
- class MaxRetryError(RequestError):
78
- """Raised when the maximum number of retries is exceeded.
79
-
80
- :param pool: The connection pool
81
- :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
82
- :param string url: The requested Url
83
- :param exceptions.Exception reason: The underlying error
84
-
85
- """
86
-
87
- def __init__(self, pool, url, reason=None):
88
- self.reason = reason
89
-
90
- message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
91
-
92
- RequestError.__init__(self, pool, url, message)
93
-
94
-
95
- class HostChangedError(RequestError):
96
- """Raised when an existing pool gets a request for a foreign host."""
97
-
98
- def __init__(self, pool, url, retries=3):
99
- message = "Tried to open a foreign host with url: %s" % url
100
- RequestError.__init__(self, pool, url, message)
101
- self.retries = retries
102
-
103
-
104
- class TimeoutStateError(HTTPError):
105
- """Raised when passing an invalid state to a timeout"""
106
-
107
- pass
108
-
109
-
110
- class TimeoutError(HTTPError):
111
- """Raised when a socket timeout error occurs.
112
-
113
- Catching this error will catch both :exc:`ReadTimeoutErrors
114
- <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
115
- """
116
-
117
- pass
118
-
119
-
120
- class ReadTimeoutError(TimeoutError, RequestError):
121
- """Raised when a socket timeout occurs while receiving data from a server"""
122
-
123
- pass
124
-
125
-
126
- # This timeout error does not have a URL attached and needs to inherit from the
127
- # base HTTPError
128
- class ConnectTimeoutError(TimeoutError):
129
- """Raised when a socket timeout occurs while connecting to a server"""
130
-
131
- pass
132
-
133
-
134
- class NewConnectionError(ConnectTimeoutError, PoolError):
135
- """Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
136
-
137
- pass
138
-
139
-
140
- class EmptyPoolError(PoolError):
141
- """Raised when a pool runs out of connections and no more are allowed."""
142
-
143
- pass
144
-
145
-
146
- class ClosedPoolError(PoolError):
147
- """Raised when a request enters a pool after the pool has been closed."""
148
-
149
- pass
150
-
151
-
152
- class LocationValueError(ValueError, HTTPError):
153
- """Raised when there is something wrong with a given URL input."""
154
-
155
- pass
156
-
157
-
158
- class LocationParseError(LocationValueError):
159
- """Raised when get_host or similar fails to parse the URL input."""
160
-
161
- def __init__(self, location):
162
- message = "Failed to parse: %s" % location
163
- HTTPError.__init__(self, message)
164
-
165
- self.location = location
166
-
167
-
168
- class URLSchemeUnknown(LocationValueError):
169
- """Raised when a URL input has an unsupported scheme."""
170
-
171
- def __init__(self, scheme):
172
- message = "Not supported URL scheme %s" % scheme
173
- super(URLSchemeUnknown, self).__init__(message)
174
-
175
- self.scheme = scheme
176
-
177
-
178
- class ResponseError(HTTPError):
179
- """Used as a container for an error reason supplied in a MaxRetryError."""
180
-
181
- GENERIC_ERROR = "too many error responses"
182
- SPECIFIC_ERROR = "too many {status_code} error responses"
183
-
184
-
185
- class SecurityWarning(HTTPWarning):
186
- """Warned when performing security reducing actions"""
187
-
188
- pass
189
-
190
-
191
- class SubjectAltNameWarning(SecurityWarning):
192
- """Warned when connecting to a host with a certificate missing a SAN."""
193
-
194
- pass
195
-
196
-
197
- class InsecureRequestWarning(SecurityWarning):
198
- """Warned when making an unverified HTTPS request."""
199
-
200
- pass
201
-
202
-
203
- class SystemTimeWarning(SecurityWarning):
204
- """Warned when system time is suspected to be wrong"""
205
-
206
- pass
207
-
208
-
209
- class InsecurePlatformWarning(SecurityWarning):
210
- """Warned when certain TLS/SSL configuration is not available on a platform."""
211
-
212
- pass
213
-
214
-
215
- class SNIMissingWarning(HTTPWarning):
216
- """Warned when making a HTTPS request without SNI available."""
217
-
218
- pass
219
-
220
-
221
- class DependencyWarning(HTTPWarning):
222
- """
223
- Warned when an attempt is made to import a module with missing optional
224
- dependencies.
225
- """
226
-
227
- pass
228
-
229
-
230
- class ResponseNotChunked(ProtocolError, ValueError):
231
- """Response needs to be chunked in order to read it as chunks."""
232
-
233
- pass
234
-
235
-
236
- class BodyNotHttplibCompatible(HTTPError):
237
- """
238
- Body should be :class:`http.client.HTTPResponse` like
239
- (have an fp attribute which returns raw chunks) for read_chunked().
240
- """
241
-
242
- pass
243
-
244
-
245
- class IncompleteRead(HTTPError, httplib_IncompleteRead):
246
- """
247
- Response length doesn't match expected Content-Length
248
-
249
- Subclass of :class:`http.client.IncompleteRead` to allow int value
250
- for ``partial`` to avoid creating large objects on streamed reads.
251
- """
252
-
253
- def __init__(self, partial, expected):
254
- super(IncompleteRead, self).__init__(partial, expected)
255
-
256
- def __repr__(self):
257
- return "IncompleteRead(%i bytes read, %i more expected)" % (
258
- self.partial,
259
- self.expected,
260
- )
261
-
262
-
263
- class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
264
- """Invalid chunk length in a chunked response."""
265
-
266
- def __init__(self, response, length):
267
- super(InvalidChunkLength, self).__init__(
268
- response.tell(), response.length_remaining
269
- )
270
- self.response = response
271
- self.length = length
272
-
273
- def __repr__(self):
274
- return "InvalidChunkLength(got length %r, %i bytes read)" % (
275
- self.length,
276
- self.partial,
277
- )
278
-
279
-
280
- class InvalidHeader(HTTPError):
281
- """The header provided was somehow invalid."""
282
-
283
- pass
284
-
285
-
286
- class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
287
- """ProxyManager does not support the supplied scheme"""
288
-
289
- # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
290
-
291
- def __init__(self, scheme):
292
- # 'localhost' is here because our URL parser parses
293
- # localhost:8080 -> scheme=localhost, remove if we fix this.
294
- if scheme == "localhost":
295
- scheme = None
296
- if scheme is None:
297
- message = "Proxy URL had no scheme, should start with http:// or https://"
298
- else:
299
- message = (
300
- "Proxy URL had unsupported scheme %s, should use http:// or https://"
301
- % scheme
302
- )
303
- super(ProxySchemeUnknown, self).__init__(message)
304
-
305
-
306
- class ProxySchemeUnsupported(ValueError):
307
- """Fetching HTTPS resources through HTTPS proxies is unsupported"""
308
-
309
- pass
310
-
311
-
312
- class HeaderParsingError(HTTPError):
313
- """Raised by assert_header_parsing, but we convert it to a log.warning statement."""
314
-
315
- def __init__(self, defects, unparsed_data):
316
- message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
317
- super(HeaderParsingError, self).__init__(message)
318
-
319
-
320
- class UnrewindableBodyError(HTTPError):
321
- """urllib3 encountered an error when trying to rewind a body"""
322
-
323
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/fine_tune.py DELETED
@@ -1,87 +0,0 @@
1
- from urllib.parse import quote_plus
2
-
3
- from openai import api_requestor, util, error
4
- from openai.api_resources.abstract import (
5
- CreateableAPIResource,
6
- ListableAPIResource,
7
- nested_resource_class_methods,
8
- )
9
- from openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
10
- from openai.openai_response import OpenAIResponse
11
- from openai.util import ApiType
12
-
13
-
14
- @nested_resource_class_methods("event", operations=["list"])
15
- class FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):
16
- OBJECT_NAME = "fine-tunes"
17
-
18
- @classmethod
19
- def cancel(
20
- cls,
21
- id,
22
- api_key=None,
23
- api_type=None,
24
- request_id=None,
25
- api_version=None,
26
- **params
27
- ):
28
- base = cls.class_url()
29
- extn = quote_plus(id)
30
-
31
- typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
32
- if typed_api_type == ApiType.AZURE:
33
- url = "/%s%s/%s/cancel?api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
34
- elif typed_api_type == ApiType.OPEN_AI:
35
- url = "%s/%s/cancel" % (base, extn)
36
- else:
37
- raise error.InvalidAPIType('Unsupported API type %s' % api_type)
38
-
39
- instance = cls(id, api_key, **params)
40
- return instance.request("post", url, request_id=request_id)
41
-
42
- @classmethod
43
- def stream_events(
44
- cls,
45
- id,
46
- api_key=None,
47
- api_base=None,
48
- api_type=None,
49
- request_id=None,
50
- api_version=None,
51
- organization=None,
52
- **params,
53
- ):
54
- base = cls.class_url()
55
- extn = quote_plus(id)
56
-
57
- requestor = api_requestor.APIRequestor(
58
- api_key,
59
- api_base=api_base,
60
- api_type=api_type,
61
- api_version=api_version,
62
- organization=organization,
63
- )
64
-
65
- typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
66
-
67
- if typed_api_type == ApiType.AZURE:
68
- url = "/%s%s/%s/events?stream=true&api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
69
- elif typed_api_type == ApiType.OPEN_AI:
70
- url = "%s/%s/events?stream=true" % (base, extn)
71
- else:
72
- raise error.InvalidAPIType('Unsupported API type %s' % api_type)
73
-
74
- response, _, api_key = requestor.request(
75
- "get", url, params, stream=True, request_id=request_id
76
- )
77
-
78
- assert not isinstance(response, OpenAIResponse) # must be an iterator
79
- return (
80
- util.convert_to_openai_object(
81
- line,
82
- api_key,
83
- api_version,
84
- organization,
85
- )
86
- for line in response
87
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BuBBLe1q/anything-v3.0/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Anything V3.0
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.10.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: akhaliq/anything-v3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/config/device_system.h DELETED
@@ -1,61 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- // reserve 0 for undefined
20
- #define THRUST_DEVICE_SYSTEM_CUDA 1
21
- #define THRUST_DEVICE_SYSTEM_OMP 2
22
- #define THRUST_DEVICE_SYSTEM_TBB 3
23
- #define THRUST_DEVICE_SYSTEM_CPP 4
24
-
25
- #ifndef THRUST_DEVICE_SYSTEM
26
- #define THRUST_DEVICE_SYSTEM THRUST_DEVICE_SYSTEM_CUDA
27
- #endif // THRUST_DEVICE_SYSTEM
28
-
29
- // XXX make the use of THRUST_DEVICE_BACKEND an error in Thrust 1.7
30
- // XXX eliminate the following in Thrust 1.7
31
-
32
- #define THRUST_DEVICE_BACKEND_CUDA THRUST_DEVICE_SYSTEM_CUDA
33
- #define THRUST_DEVICE_BACKEND_OMP THRUST_DEVICE_SYSTEM_OMP
34
- #define THRUST_DEVICE_BACKEND_TBB THRUST_DEVICE_SYSTEM_TBB
35
-
36
- #ifdef THRUST_DEVICE_BACKEND
37
- # if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
38
- # pragma message("----------------------------------------------------------------------------------")
39
- # pragma message("| WARNING: THRUST_DEVICE_BACKEND is deprecated; use THRUST_DEVICE_SYSTEM instead |")
40
- # pragma message("----------------------------------------------------------------------------------")
41
- # else
42
- # warning ----------------------------------------------------------------------------------
43
- # warning | WARNING: THRUST_DEVICE_BACKEND is deprecated; use THRUST_DEVICE_SYSTEM instead |
44
- # warning ----------------------------------------------------------------------------------
45
- # endif // THRUST_HOST_COMPILER
46
- # undef THRUST_DEVICE_SYSTEM
47
- # define THRUST_DEVICE_SYSTEM THRUST_DEVICE_BACKEND
48
- #endif // THRUST_DEVICE_BACKEND
49
-
50
- #if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
51
- #define __THRUST_DEVICE_SYSTEM_NAMESPACE cuda
52
- #elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
53
- #define __THRUST_DEVICE_SYSTEM_NAMESPACE omp
54
- #elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_TBB
55
- #define __THRUST_DEVICE_SYSTEM_NAMESPACE tbb
56
- #elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CPP
57
- #define __THRUST_DEVICE_SYSTEM_NAMESPACE cpp
58
- #endif
59
-
60
- #define __THRUST_DEVICE_SYSTEM_ROOT thrust/system/__THRUST_DEVICE_SYSTEM_NAMESPACE
61
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/random/discard_block_engine.h DELETED
@@ -1,252 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file discard_block_engine.h
19
- * \brief A random number engine which adapts a base engine and produces
20
- * numbers by discarding all but a contiguous blocks of its values.
21
- */
22
-
23
- #pragma once
24
-
25
- #include <thrust/detail/config.h>
26
-
27
- #include <thrust/detail/config.h>
28
- #include <iostream>
29
- #include <thrust/detail/cstdint.h>
30
- #include <thrust/random/detail/random_core_access.h>
31
-
32
- namespace thrust
33
- {
34
-
35
- namespace random
36
- {
37
-
38
- /*! \addtogroup random_number_engine_adaptors Random Number Engine Adaptor Class Templates
39
- * \ingroup random
40
- * \{
41
- */
42
-
43
- /*! \class discard_block_engine
44
- * \brief A \p discard_block_engine adapts an existing base random number engine and produces
45
- * random values by discarding some of the values returned by its base engine.
46
- * Each cycle of the compound engine begins by returning \c r values successively produced
47
- * by the base engine and ends by discarding <tt>p-r</tt> such values. The engine's state
48
- * is the state of its base engine followed by the number of calls to <tt>operator()</tt>
49
- * that have occurred since the beginning of the current cycle.
50
- *
51
- * \tparam Engine The type of the base random number engine to adapt.
52
- * \tparam p The discard cycle length.
53
- * \tparam r The number of values to return of the base engine. Because <tt>p-r</tt> will be
54
- * discarded, <tt>r <= p</tt>.
55
- *
56
- * The following code snippet shows an example of using a \p discard_block_engine instance:
57
- *
58
- * \code
59
- * #include <thrust/random/linear_congruential_engine.h>
60
- * #include <thrust/random/discard_block_engine.h>
61
- * #include <iostream>
62
- *
63
- * int main(void)
64
- * {
65
- * // create a discard_block_engine from minstd_rand, with a cycle length of 13
66
- * // keep every first 10 values, and discard the next 3
67
- * thrust::discard_block_engine<thrust::minstd_rand, 13, 10> rng;
68
- *
69
- * // print a random number to standard output
70
- * std::cout << rng() << std::endl;
71
- *
72
- * return 0;
73
- * }
74
- * \endcode
75
- */
76
- template<typename Engine, size_t p, size_t r>
77
- class discard_block_engine
78
- {
79
- public:
80
- // types
81
-
82
- /*! \typedef base_type
83
- * \brief The type of the adapted base random number engine.
84
- */
85
- typedef Engine base_type;
86
-
87
- /*! \typedef result_type
88
- * \brief The type of the unsigned integer produced by this \p linear_congruential_engine.
89
- */
90
- typedef typename base_type::result_type result_type;
91
-
92
- // engine characteristics
93
-
94
- /*! The length of the production cycle.
95
- */
96
- static const size_t block_size = p;
97
-
98
- /*! The number of used numbers per production cycle.
99
- */
100
- static const size_t used_block = r;
101
-
102
- /*! The smallest value this \p discard_block_engine may potentially produce.
103
- */
104
- static const result_type min = base_type::min;
105
-
106
- /*! The largest value this \p discard_block_engine may potentially produce.
107
- */
108
- static const result_type max = base_type::max;
109
-
110
- // constructors and seeding functions
111
-
112
- /*! This constructor constructs a new \p discard_block_engine and constructs
113
- * its \p base_type engine using its null constructor.
114
- */
115
- __host__ __device__
116
- discard_block_engine();
117
-
118
- /*! This constructor constructs a new \p discard_block_engine using
119
- * a given \p base_type engine to initialize its adapted base engine.
120
- *
121
- * \param urng A \p base_type to use to initialize this \p discard_block_engine's
122
- * adapted base engine.
123
- */
124
- __host__ __device__
125
- explicit discard_block_engine(const base_type &urng);
126
-
127
- /*! This constructor initializes a new \p discard_block_engine with a given seed.
128
- *
129
- * \param s The seed used to intialize this \p discard_block_engine's adapted base engine.
130
- */
131
- __host__ __device__
132
- explicit discard_block_engine(result_type s);
133
-
134
- /*! This method initializes the state of this \p discard_block_engine's adapted base engine
135
- * by using its \p default_seed value.
136
- */
137
- __host__ __device__
138
- void seed(void);
139
-
140
- /*! This method initializes the state of this \p discard_block_engine's adapted base engine
141
- * by using the given seed.
142
- *
143
- * \param s The seed with which to intialize this \p discard_block_engine's adapted base engine.
144
- */
145
- __host__ __device__
146
- void seed(result_type s);
147
-
148
- // generating functions
149
-
150
- /*! This member function produces a new random value and updates this \p discard_block_engine's state.
151
- * \return A new random number.
152
- */
153
- __host__ __device__
154
- result_type operator()(void);
155
-
156
- /*! This member function advances this \p discard_block_engine's state a given number of times
157
- * and discards the results.
158
- *
159
- * \param z The number of random values to discard.
160
- * \note This function is provided because an implementation may be able to accelerate it.
161
- */
162
- __host__ __device__
163
- void discard(unsigned long long z);
164
-
165
- // property functions
166
-
167
- /*! This member function returns a const reference to this \p discard_block_engine's
168
- * adapted base engine.
169
- *
170
- * \return A const reference to the base engine this \p discard_block_engine adapts.
171
- */
172
- __host__ __device__
173
- const base_type &base(void) const;
174
-
175
- /*! \cond
176
- */
177
- private:
178
- base_type m_e;
179
- unsigned int m_n;
180
-
181
- friend struct thrust::random::detail::random_core_access;
182
-
183
- __host__ __device__
184
- bool equal(const discard_block_engine &rhs) const;
185
-
186
- template<typename CharT, typename Traits>
187
- std::basic_ostream<CharT,Traits>& stream_out(std::basic_ostream<CharT,Traits> &os) const;
188
-
189
- template<typename CharT, typename Traits>
190
- std::basic_istream<CharT,Traits>& stream_in(std::basic_istream<CharT,Traits> &is);
191
- /*! \endcond
192
- */
193
- }; // end discard_block_engine
194
-
195
-
196
- /*! This function checks two \p discard_block_engines for equality.
197
- * \param lhs The first \p discard_block_engine to test.
198
- * \param rhs The second \p discard_block_engine to test.
199
- * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise.
200
- */
201
- template<typename Engine, size_t p, size_t r>
202
- __host__ __device__
203
- bool operator==(const discard_block_engine<Engine,p,r> &lhs,
204
- const discard_block_engine<Engine,p,r> &rhs);
205
-
206
-
207
- /*! This function checks two \p discard_block_engines for inequality.
208
- * \param lhs The first \p discard_block_engine to test.
209
- * \param rhs The second \p discard_block_engine to test.
210
- * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise.
211
- */
212
- template<typename Engine, size_t p, size_t r>
213
- __host__ __device__
214
- bool operator!=(const discard_block_engine<Engine,p,r> &lhs,
215
- const discard_block_engine<Engine,p,r> &rhs);
216
-
217
-
218
- /*! This function streams a discard_block_engine to a \p std::basic_ostream.
219
- * \param os The \p basic_ostream to stream out to.
220
- * \param e The \p discard_block_engine to stream out.
221
- * \return \p os
222
- */
223
- template<typename Engine, size_t p, size_t r,
224
- typename CharT, typename Traits>
225
- std::basic_ostream<CharT,Traits>&
226
- operator<<(std::basic_ostream<CharT,Traits> &os,
227
- const discard_block_engine<Engine,p,r> &e);
228
-
229
-
230
- /*! This function streams a discard_block_engine in from a std::basic_istream.
231
- * \param is The \p basic_istream to stream from.
232
- * \param e The \p discard_block_engine to stream in.
233
- * \return \p is
234
- */
235
- template<typename Engine, size_t p, size_t r,
236
- typename CharT, typename Traits>
237
- std::basic_istream<CharT,Traits>&
238
- operator>>(std::basic_istream<CharT,Traits> &is,
239
- discard_block_engine<Engine,p,r> &e);
240
-
241
- /*! \} // end random_number_engine_adaptors
242
- */
243
-
244
- } // end random
245
-
246
- // import names into thrust::
247
- using random::discard_block_engine;
248
-
249
- } // end thrust
250
-
251
- #include <thrust/random/detail/discard_block_engine.inl>
252
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/apis/train.py DELETED
@@ -1,185 +0,0 @@
1
- import random
2
- import warnings
3
-
4
- import numpy as np
5
- import torch
6
- from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
7
- from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
8
- Fp16OptimizerHook, OptimizerHook, build_optimizer,
9
- build_runner)
10
- from mmcv.utils import build_from_cfg
11
-
12
- from mmdet.core import DistEvalHook, EvalHook
13
- from mmdet.datasets import (build_dataloader, build_dataset,
14
- replace_ImageToTensor)
15
- from mmdet.utils import get_root_logger
16
- from mmcv_custom.runner import EpochBasedRunnerAmp
17
- try:
18
- import apex
19
- except:
20
- print('apex is not installed')
21
-
22
-
23
- def set_random_seed(seed, deterministic=False):
24
- """Set random seed.
25
-
26
- Args:
27
- seed (int): Seed to be used.
28
- deterministic (bool): Whether to set the deterministic option for
29
- CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
30
- to True and `torch.backends.cudnn.benchmark` to False.
31
- Default: False.
32
- """
33
- random.seed(seed)
34
- np.random.seed(seed)
35
- torch.manual_seed(seed)
36
- torch.cuda.manual_seed_all(seed)
37
- if deterministic:
38
- torch.backends.cudnn.deterministic = True
39
- torch.backends.cudnn.benchmark = False
40
-
41
-
42
- def train_detector(model,
43
- dataset,
44
- cfg,
45
- distributed=False,
46
- validate=False,
47
- timestamp=None,
48
- meta=None):
49
- logger = get_root_logger(cfg.log_level)
50
-
51
- # prepare data loaders
52
- dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
53
- if 'imgs_per_gpu' in cfg.data:
54
- logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
55
- 'Please use "samples_per_gpu" instead')
56
- if 'samples_per_gpu' in cfg.data:
57
- logger.warning(
58
- f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
59
- f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
60
- f'={cfg.data.imgs_per_gpu} is used in this experiments')
61
- else:
62
- logger.warning(
63
- 'Automatically set "samples_per_gpu"="imgs_per_gpu"='
64
- f'{cfg.data.imgs_per_gpu} in this experiments')
65
- cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
66
-
67
- data_loaders = [
68
- build_dataloader(
69
- ds,
70
- cfg.data.samples_per_gpu,
71
- cfg.data.workers_per_gpu,
72
- # cfg.gpus will be ignored if distributed
73
- len(cfg.gpu_ids),
74
- dist=distributed,
75
- seed=cfg.seed) for ds in dataset
76
- ]
77
-
78
- # build optimizer
79
- optimizer = build_optimizer(model, cfg.optimizer)
80
-
81
- # use apex fp16 optimizer
82
- if cfg.optimizer_config.get("type", None) and cfg.optimizer_config["type"] == "DistOptimizerHook":
83
- if cfg.optimizer_config.get("use_fp16", False):
84
- model, optimizer = apex.amp.initialize(
85
- model.cuda(), optimizer, opt_level="O1")
86
- for m in model.modules():
87
- if hasattr(m, "fp16_enabled"):
88
- m.fp16_enabled = True
89
-
90
- # put model on gpus
91
- if distributed:
92
- find_unused_parameters = cfg.get('find_unused_parameters', False)
93
- # Sets the `find_unused_parameters` parameter in
94
- # torch.nn.parallel.DistributedDataParallel
95
- model = MMDistributedDataParallel(
96
- model.cuda(),
97
- device_ids=[torch.cuda.current_device()],
98
- broadcast_buffers=False,
99
- find_unused_parameters=find_unused_parameters)
100
- else:
101
- model = MMDataParallel(
102
- model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
103
-
104
- if 'runner' not in cfg:
105
- cfg.runner = {
106
- 'type': 'EpochBasedRunner',
107
- 'max_epochs': cfg.total_epochs
108
- }
109
- warnings.warn(
110
- 'config is now expected to have a `runner` section, '
111
- 'please set `runner` in your config.', UserWarning)
112
- else:
113
- if 'total_epochs' in cfg:
114
- assert cfg.total_epochs == cfg.runner.max_epochs
115
-
116
- # build runner
117
- runner = build_runner(
118
- cfg.runner,
119
- default_args=dict(
120
- model=model,
121
- optimizer=optimizer,
122
- work_dir=cfg.work_dir,
123
- logger=logger,
124
- meta=meta))
125
-
126
- # an ugly workaround to make .log and .log.json filenames the same
127
- runner.timestamp = timestamp
128
-
129
- # fp16 setting
130
- fp16_cfg = cfg.get('fp16', None)
131
- if fp16_cfg is not None:
132
- optimizer_config = Fp16OptimizerHook(
133
- **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
134
- elif distributed and 'type' not in cfg.optimizer_config:
135
- optimizer_config = OptimizerHook(**cfg.optimizer_config)
136
- else:
137
- optimizer_config = cfg.optimizer_config
138
-
139
- # register hooks
140
- runner.register_training_hooks(cfg.lr_config, optimizer_config,
141
- cfg.checkpoint_config, cfg.log_config,
142
- cfg.get('momentum_config', None))
143
- if distributed:
144
- if isinstance(runner, EpochBasedRunner):
145
- runner.register_hook(DistSamplerSeedHook())
146
-
147
- # register eval hooks
148
- if validate:
149
- # Support batch_size > 1 in validation
150
- val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
151
- if val_samples_per_gpu > 1:
152
- # Replace 'ImageToTensor' to 'DefaultFormatBundle'
153
- cfg.data.val.pipeline = replace_ImageToTensor(
154
- cfg.data.val.pipeline)
155
- val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
156
- val_dataloader = build_dataloader(
157
- val_dataset,
158
- samples_per_gpu=val_samples_per_gpu,
159
- workers_per_gpu=cfg.data.workers_per_gpu,
160
- dist=distributed,
161
- shuffle=False)
162
- eval_cfg = cfg.get('evaluation', {})
163
- eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
164
- eval_hook = DistEvalHook if distributed else EvalHook
165
- runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
166
-
167
- # user-defined hooks
168
- if cfg.get('custom_hooks', None):
169
- custom_hooks = cfg.custom_hooks
170
- assert isinstance(custom_hooks, list), \
171
- f'custom_hooks expect list type, but got {type(custom_hooks)}'
172
- for hook_cfg in cfg.custom_hooks:
173
- assert isinstance(hook_cfg, dict), \
174
- 'Each item in custom_hooks expects dict type, but got ' \
175
- f'{type(hook_cfg)}'
176
- hook_cfg = hook_cfg.copy()
177
- priority = hook_cfg.pop('priority', 'NORMAL')
178
- hook = build_from_cfg(hook_cfg, HOOKS)
179
- runner.register_hook(hook, priority=priority)
180
-
181
- if cfg.resume_from:
182
- runner.resume(cfg.resume_from)
183
- elif cfg.load_from:
184
- runner.load_checkpoint(cfg.load_from)
185
- runner.run(data_loaders, cfg.workflow)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/config.py DELETED
@@ -1,245 +0,0 @@
1
- # --------------------------------------------------------
2
- # Unified Contrastive Learning (UniCL)
3
- # Copyright (c) 2022 Microsoft
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Jianwei Yang ([email protected])
6
- # Based on Swin Transformer written by Zhe Liu
7
- # --------------------------------------------------------
8
-
9
- import os
10
- import yaml
11
- from yacs.config import CfgNode as CN
12
-
13
- _C = CN()
14
- _C.VERBOSE = False
15
-
16
- # Base config files
17
- _C.BASE = ['']
18
-
19
- # -----------------------------------------------------------------------------
20
- # Data settings
21
- # -----------------------------------------------------------------------------
22
- _C.DATA = CN()
23
- # Batch size for a single GPU, could be overwritten by command line argument
24
- _C.DATA.BATCH_SIZE = 128
25
- # Path to dataset, could be overwritten by command line argument
26
- _C.DATA.DATA_PATH = ''
27
- # Dataset name
28
- _C.DATA.DATASET = 'imagenet'
29
- # Input image size
30
- _C.DATA.IMG_SIZE = 224
31
- # Interpolation to resize image (random, bilinear, bicubic)
32
- _C.DATA.INTERPOLATION = 'bicubic'
33
- # Use zipped dataset instead of folder dataset
34
- # could be overwritten by command line argument
35
- _C.DATA.ZIP_MODE = False
36
- # Cache Data in Memory, could be overwritten by command line argument
37
- _C.DATA.CACHE_MODE = 'part'
38
- # Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
39
- _C.DATA.PIN_MEMORY = True
40
- # Number of data loading threads
41
- _C.DATA.NUM_WORKERS = 8
42
-
43
- # -----------------------------------------------------------------------------
44
- # Model settings
45
- # -----------------------------------------------------------------------------
46
- _C.MODEL = CN()
47
- # Model name
48
- _C.MODEL.NAME = ''
49
- # Checkpoint to resume, could be overwritten by command line argument
50
- _C.MODEL.RESUME = ''
51
- # Number of classes, overwritten in data preparation
52
- _C.MODEL.NUM_CLASSES = 0
53
- # Label Smoothing
54
- _C.MODEL.LABEL_SMOOTHING = 0.1
55
- # Whether load pretrained model
56
- _C.MODEL.PRETRAINED = ''
57
- # Projection dimension
58
- _C.MODEL.DIM_PROJECTION = 512
59
- # Mode specific
60
- _C.MODEL.SPEC = CN(new_allowed=True)
61
- # -----------------------------------------------------------------------------
62
- # Build Image Encoder
63
- # -----------------------------------------------------------------------------
64
- _C.MODEL.IMAGE_ENCODER = CN()
65
- # Image encoder type
66
- _C.MODEL.IMAGE_ENCODER.TYPE = 'swin'
67
- # Input image size
68
- _C.MODEL.IMAGE_ENCODER.IMG_SIZE = 224
69
- # Dropout rate
70
- _C.MODEL.IMAGE_ENCODER.DROP_RATE = 0.0
71
- # Drop path rate
72
- _C.MODEL.IMAGE_ENCODER.DROP_PATH_RATE = 0.1
73
-
74
- # Swin Transformer parameters
75
- _C.MODEL.IMAGE_ENCODER.SWIN = CN()
76
- _C.MODEL.IMAGE_ENCODER.SWIN.PATCH_SIZE = 4
77
- _C.MODEL.IMAGE_ENCODER.SWIN.IN_CHANS = 3
78
- _C.MODEL.IMAGE_ENCODER.SWIN.EMBED_DIM = 96
79
- _C.MODEL.IMAGE_ENCODER.SWIN.DEPTHS = [2, 2, 6, 2]
80
- _C.MODEL.IMAGE_ENCODER.SWIN.NUM_HEADS = [3, 6, 12, 24]
81
- _C.MODEL.IMAGE_ENCODER.SWIN.WINDOW_SIZE = 7
82
- _C.MODEL.IMAGE_ENCODER.SWIN.MLP_RATIO = 4.
83
- _C.MODEL.IMAGE_ENCODER.SWIN.QKV_BIAS = True
84
- _C.MODEL.IMAGE_ENCODER.SWIN.QK_SCALE = None
85
- _C.MODEL.IMAGE_ENCODER.SWIN.APE = False
86
- _C.MODEL.IMAGE_ENCODER.SWIN.PATCH_NORM = True
87
-
88
- # FocalNet parameters
89
- _C.MODEL.IMAGE_ENCODER.FOCAL = CN()
90
- _C.MODEL.IMAGE_ENCODER.FOCAL.PATCH_SIZE = 4
91
- _C.MODEL.IMAGE_ENCODER.FOCAL.IN_CHANS = 3
92
- _C.MODEL.IMAGE_ENCODER.FOCAL.EMBED_DIM = 96
93
- _C.MODEL.IMAGE_ENCODER.FOCAL.DEPTHS = [2, 2, 6, 2]
94
- _C.MODEL.IMAGE_ENCODER.FOCAL.MLP_RATIO = 4.
95
- _C.MODEL.IMAGE_ENCODER.FOCAL.PATCH_NORM = True
96
- _C.MODEL.IMAGE_ENCODER.FOCAL.FOCAL_LEVELS = [2, 2, 2, 2]
97
- _C.MODEL.IMAGE_ENCODER.FOCAL.FOCAL_WINDOWS = [3, 3, 3, 3]
98
- _C.MODEL.IMAGE_ENCODER.FOCAL.FOCAL_FACTORS = [2, 2, 2, 2]
99
- _C.MODEL.IMAGE_ENCODER.FOCAL.USE_CONV_EMBED = False
100
- _C.MODEL.IMAGE_ENCODER.FOCAL.USE_LAYERSCALE = False
101
- _C.MODEL.IMAGE_ENCODER.FOCAL.USE_POSTLN = False
102
-
103
- # -----------------------------------------------------------------------------
104
- # Build Text Encoder
105
- # -----------------------------------------------------------------------------
106
- _C.MODEL.TEXT_ENCODER = CN()
107
-
108
- _C.MODEL.TEXT_ENCODER.NAME = 'transformer'
109
- _C.MODEL.TEXT_ENCODER.LOAD_PRETRAINED = False
110
- _C.MODEL.TEXT_ENCODER.PRETRAINED = ''
111
- _C.MODEL.TEXT_ENCODER.TOKENIZER = 'clip'
112
- _C.MODEL.TEXT_ENCODER.CONTEXT_LENGTH = 77
113
- _C.MODEL.TEXT_ENCODER.WIDTH = 1024
114
- _C.MODEL.TEXT_ENCODER.HEADS = 16
115
- _C.MODEL.TEXT_ENCODER.LAYERS = 12
116
- _C.MODEL.TEXT_ENCODER.AUTOGRESSIVE = True
117
-
118
- # -----------------------------------------------------------------------------
119
- # Training settings
120
- # -----------------------------------------------------------------------------
121
- _C.TRAIN = CN()
122
- _C.TRAIN.START_EPOCH = 0
123
- _C.TRAIN.EPOCHS = 32
124
- _C.TRAIN.WARMUP_EPOCHS = 5
125
- _C.TRAIN.WEIGHT_DECAY = 0.1
126
- _C.TRAIN.BASE_LR = 5e-4
127
- _C.TRAIN.WARMUP_LR = 5e-7
128
- _C.TRAIN.MIN_LR = 5e-6
129
- # Clip gradient norm
130
- _C.TRAIN.CLIP_GRAD = 5.0
131
- # Auto resume from latest checkpoint
132
- _C.TRAIN.AUTO_RESUME = True
133
- # Gradient accumulation steps
134
- # could be overwritten by command line argument
135
- _C.TRAIN.ACCUMULATION_STEPS = 0
136
- # Whether to use gradient checkpointing to save memory
137
- # could be overwritten by command line argument
138
- _C.TRAIN.USE_CHECKPOINT = False
139
-
140
- # LR scheduler
141
- _C.TRAIN.LR_SCHEDULER = CN()
142
- _C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
143
- # Epoch interval to decay LR, used in StepLRScheduler
144
- _C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
145
- # LR decay rate, used in StepLRScheduler
146
- _C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
147
-
148
- # Optimizer
149
- _C.TRAIN.OPTIMIZER = CN()
150
- _C.TRAIN.OPTIMIZER.NAME = 'adamw'
151
- # Optimizer Epsilon
152
- _C.TRAIN.OPTIMIZER.EPS = 1e-8
153
- # Optimizer Betas
154
- _C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
155
- # SGD momentum
156
- _C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
157
-
158
- # -----------------------------------------------------------------------------
159
- # Augmentation settings
160
- # -----------------------------------------------------------------------------
161
- _C.AUG = CN()
162
- # Color jitter factor
163
- _C.AUG.COLOR_JITTER = 0.4
164
- # Use AutoAugment policy. "v0" or "original"
165
- _C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
166
- # Random erase prob
167
- _C.AUG.REPROB = 0.25
168
- # Random erase mode
169
- _C.AUG.REMODE = 'pixel'
170
- # Random erase count
171
- _C.AUG.RECOUNT = 1
172
- # Mixup alpha, mixup enabled if > 0
173
- _C.AUG.MIXUP = 0.8
174
- # Cutmix alpha, cutmix enabled if > 0
175
- _C.AUG.CUTMIX = 1.0
176
- # Cutmix min/max ratio, overrides alpha and enables cutmix if set
177
- _C.AUG.CUTMIX_MINMAX = None
178
- # Probability of performing mixup or cutmix when either/both is enabled
179
- _C.AUG.MIXUP_PROB = 1.0
180
- # Probability of switching to cutmix when both mixup and cutmix enabled
181
- _C.AUG.MIXUP_SWITCH_PROB = 0.5
182
- # How to apply mixup/cutmix params. Per "batch", "pair", or "elem"
183
- _C.AUG.MIXUP_MODE = 'batch'
184
-
185
- # -----------------------------------------------------------------------------
186
- # Testing settings
187
- # -----------------------------------------------------------------------------
188
- _C.TEST = CN()
189
- # Whether to use center crop when testing
190
- _C.TEST.CROP = True
191
-
192
- # -----------------------------------------------------------------------------
193
- # Misc
194
- # -----------------------------------------------------------------------------
195
- # Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2')
196
- # overwritten by command line argument
197
- _C.AMP_OPT_LEVEL = ''
198
- # Path to output folder, overwritten by command line argument
199
- _C.OUTPUT = ''
200
- # Tag of experiment, overwritten by command line argument
201
- _C.TAG = 'default'
202
- # Frequency to save checkpoint
203
- _C.SAVE_FREQ = 1
204
- # Frequency to logging info
205
- _C.PRINT_FREQ = 100
206
- # Fixed random seed
207
- _C.SEED = 0
208
- # Perform evaluation only, overwritten by command line argument
209
- _C.EVAL_MODE = False
210
- # Test throughput only, overwritten by command line argument
211
- _C.THROUGHPUT_MODE = False
212
- # Debug only so that skip dataloader initialization, overwritten by command line argument
213
- _C.DEBUG_MODE = False
214
- # local rank for DistributedDataParallel, given by command line argument
215
- _C.LOCAL_RANK = 0
216
-
217
-
218
- def _update_config_from_file(config, cfg_file):
219
- config.defrost()
220
- with open(cfg_file, 'r') as f:
221
- yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
222
-
223
- for cfg in yaml_cfg.setdefault('BASE', ['']):
224
- if cfg:
225
- _update_config_from_file(
226
- config, os.path.join(os.path.dirname(cfg_file), cfg)
227
- )
228
- print('=> merge config from {}'.format(cfg_file))
229
- config.merge_from_file(cfg_file)
230
- config.freeze()
231
-
232
-
233
- def update_config(config, args):
234
- _update_config_from_file(config, args.cfg)
235
- config.freeze()
236
-
237
-
238
- def get_config(args):
239
- """Get a yacs CfgNode object with default values."""
240
- # Return a clone so that the defaults will not be altered
241
- # This is for the "local variable" use pattern
242
- config = _C.clone()
243
- update_config(config, args)
244
-
245
- return config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/web_requests.py DELETED
@@ -1,190 +0,0 @@
1
- """Browse a webpage and summarize it using the LLM model"""
2
- from __future__ import annotations
3
-
4
- from urllib.parse import urljoin, urlparse
5
-
6
- import requests
7
- from bs4 import BeautifulSoup
8
- from requests import Response
9
- from requests.compat import urljoin
10
-
11
- from autogpt.config import Config
12
- from autogpt.memory import get_memory
13
- from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
14
-
15
- CFG = Config()
16
- memory = get_memory(CFG)
17
-
18
- session = requests.Session()
19
- session.headers.update({"User-Agent": CFG.user_agent})
20
-
21
-
22
- def is_valid_url(url: str) -> bool:
23
- """Check if the URL is valid
24
-
25
- Args:
26
- url (str): The URL to check
27
-
28
- Returns:
29
- bool: True if the URL is valid, False otherwise
30
- """
31
- try:
32
- result = urlparse(url)
33
- return all([result.scheme, result.netloc])
34
- except ValueError:
35
- return False
36
-
37
-
38
- def sanitize_url(url: str) -> str:
39
- """Sanitize the URL
40
-
41
- Args:
42
- url (str): The URL to sanitize
43
-
44
- Returns:
45
- str: The sanitized URL
46
- """
47
- return urljoin(url, urlparse(url).path)
48
-
49
-
50
- def check_local_file_access(url: str) -> bool:
51
- """Check if the URL is a local file
52
-
53
- Args:
54
- url (str): The URL to check
55
-
56
- Returns:
57
- bool: True if the URL is a local file, False otherwise
58
- """
59
- local_prefixes = [
60
- "file:///",
61
- "file://localhost/",
62
- "file://localhost",
63
- "http://localhost",
64
- "http://localhost/",
65
- "https://localhost",
66
- "https://localhost/",
67
- "http://2130706433",
68
- "http://2130706433/",
69
- "https://2130706433",
70
- "https://2130706433/",
71
- "http://127.0.0.1/",
72
- "http://127.0.0.1",
73
- "https://127.0.0.1/",
74
- "https://127.0.0.1",
75
- "https://0.0.0.0/",
76
- "https://0.0.0.0",
77
- "http://0.0.0.0/",
78
- "http://0.0.0.0",
79
- "http://0000",
80
- "http://0000/",
81
- "https://0000",
82
- "https://0000/",
83
- ]
84
- return any(url.startswith(prefix) for prefix in local_prefixes)
85
-
86
-
87
- def get_response(
88
- url: str, timeout: int = 10
89
- ) -> tuple[None, str] | tuple[Response, None]:
90
- """Get the response from a URL
91
-
92
- Args:
93
- url (str): The URL to get the response from
94
- timeout (int): The timeout for the HTTP request
95
-
96
- Returns:
97
- tuple[None, str] | tuple[Response, None]: The response and error message
98
-
99
- Raises:
100
- ValueError: If the URL is invalid
101
- requests.exceptions.RequestException: If the HTTP request fails
102
- """
103
- try:
104
- # Restrict access to local files
105
- if check_local_file_access(url):
106
- raise ValueError("Access to local files is restricted")
107
-
108
- # Most basic check if the URL is valid:
109
- if not url.startswith("http://") and not url.startswith("https://"):
110
- raise ValueError("Invalid URL format")
111
-
112
- sanitized_url = sanitize_url(url)
113
-
114
- response = session.get(sanitized_url, timeout=timeout)
115
-
116
- # Check if the response contains an HTTP error
117
- if response.status_code >= 400:
118
- return None, f"Error: HTTP {str(response.status_code)} error"
119
-
120
- return response, None
121
- except ValueError as ve:
122
- # Handle invalid URL format
123
- return None, f"Error: {str(ve)}"
124
-
125
- except requests.exceptions.RequestException as re:
126
- # Handle exceptions related to the HTTP request
127
- # (e.g., connection errors, timeouts, etc.)
128
- return None, f"Error: {str(re)}"
129
-
130
-
131
- def scrape_text(url: str) -> str:
132
- """Scrape text from a webpage
133
-
134
- Args:
135
- url (str): The URL to scrape text from
136
-
137
- Returns:
138
- str: The scraped text
139
- """
140
- response, error_message = get_response(url)
141
- if error_message:
142
- return error_message
143
- if not response:
144
- return "Error: Could not get response"
145
-
146
- soup = BeautifulSoup(response.text, "html.parser")
147
-
148
- for script in soup(["script", "style"]):
149
- script.extract()
150
-
151
- text = soup.get_text()
152
- lines = (line.strip() for line in text.splitlines())
153
- chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
154
- text = "\n".join(chunk for chunk in chunks if chunk)
155
-
156
- return text
157
-
158
-
159
- def scrape_links(url: str) -> str | list[str]:
160
- """Scrape links from a webpage
161
-
162
- Args:
163
- url (str): The URL to scrape links from
164
-
165
- Returns:
166
- str | list[str]: The scraped links
167
- """
168
- response, error_message = get_response(url)
169
- if error_message:
170
- return error_message
171
- if not response:
172
- return "Error: Could not get response"
173
- soup = BeautifulSoup(response.text, "html.parser")
174
-
175
- for script in soup(["script", "style"]):
176
- script.extract()
177
-
178
- hyperlinks = extract_hyperlinks(soup, url)
179
-
180
- return format_hyperlinks(hyperlinks)
181
-
182
-
183
- def create_message(chunk, question):
184
- """Create a message for the user to summarize a chunk of text"""
185
- return {
186
- "role": "user",
187
- "content": f'"""{chunk}""" Using the above text, answer the following'
188
- f' question: "{question}" -- if the question cannot be answered using the'
189
- " text, summarize the text.",
190
- }